diff --git a/deploy/kafka.sh b/deploy/kafka.sh index a971c15d5401d8928f35d6c33e57d59d7d636e95..6e6a0b358d88fc5f35ee7c79c6741fe4cf9c4ef5 100755 --- a/deploy/kafka.sh +++ b/deploy/kafka.sh @@ -61,7 +61,8 @@ function kfk_deploy_single() { else echo ">>> Deploy Kafka" cp "${KFK_MANIFESTS_PATH}/single-node.yaml" "${TMP_MANIFESTS_FOLDER}/kfk_single_node.yaml" - #sed -i "s//${KFK_NAMESPACE}/" "${TMP_MANIFESTS_FOLDER}/kfk_single_node.yaml" + # Set the correct advertised listeners based on the namespace + sed -i "s|kafka-service\.kafka\.svc\.cluster\.local|kafka-service.${KFK_NAMESPACE}.svc.cluster.local|g" "${TMP_MANIFESTS_FOLDER}/kfk_single_node.yaml" kubectl --namespace ${KFK_NAMESPACE} apply -f "${TMP_MANIFESTS_FOLDER}/kfk_single_node.yaml" echo ">>> Waiting Kafka statefulset to be created..." diff --git a/manifests/nginx_ingress_http.yaml b/manifests/nginx_ingress_http.yaml index 165a5952635a7c72da86c4ba4a067306d09d7202..cdd7ec001984fa4b0a446e4b3e174e05ac691fd4 100644 --- a/manifests/nginx_ingress_http.yaml +++ b/manifests/nginx_ingress_http.yaml @@ -15,10 +15,39 @@ apiVersion: networking.k8s.io/v1 kind: Ingress metadata: - name: tfs-ingress + name: tfs-ingress-web annotations: nginx.ingress.kubernetes.io/rewrite-target: "/$2" - + nginx.ingress.kubernetes.io/use-regex: "true" + nginx.ingress.kubernetes.io/limit-rps: "50" # max requests per second per source IP + nginx.ingress.kubernetes.io/limit-connections: "50" # max concurrent connections per source IP + nginx.ingress.kubernetes.io/proxy-connect-timeout: "60" # max timeout for connecting to server + nginx.ingress.kubernetes.io/proxy-send-timeout: "3600" # max timeout between two successive read operations + nginx.ingress.kubernetes.io/proxy-read-timeout: "3600" # max timeout between two successive write operations +spec: + rules: + - http: + paths: + - path: /webui(/|$)(.*) + pathType: ImplementationSpecific + backend: + service: + name: webuiservice + port: + number: 8004 + - path: /grafana(/|$)(.*) + pathType: ImplementationSpecific + backend: + service: + name: webuiservice + port: + number: 3000 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: tfs-ingress-nbi + annotations: # Enable websocket services and configure sticky cookies (seems not to work) #nginx.org/websocket-services: "nbiservice" #nginx.org/sticky-cookie-services: "serviceName=nbiservice tfs-nbi-session expires=1h path=/socket.io" @@ -43,77 +72,63 @@ spec: rules: - http: paths: - - path: /webui(/|$)(.*) - pathType: Prefix - backend: - service: - name: webuiservice - port: - number: 8004 - - path: /grafana(/|$)(.*) - pathType: Prefix - backend: - service: - name: webuiservice - port: - number: 3000 - - path: /()(.well-known/.*) + - path: /.well-known pathType: Prefix backend: service: name: nbiservice port: number: 8080 - - path: /()(restconf/.*) + - path: /restconf pathType: Prefix backend: service: name: nbiservice port: number: 8080 - - path: /()(socket.io/.*) + - path: /socket.io pathType: Prefix backend: service: name: nbiservice port: number: 8080 - - path: /()(tfs-api/.*) + - path: /tfs-api pathType: Prefix backend: service: name: nbiservice port: number: 8080 - - path: /()(bmw/.*) + - path: /bmw pathType: Prefix backend: service: name: nbiservice port: number: 8080 - - path: /()(qkd_app/.*) + - path: /qkd_app pathType: Prefix backend: service: name: nbiservice port: number: 8080 - - path: /()(camara/.*) + - path: /camara pathType: Prefix backend: service: name: nbiservice port: number: 8080 - - path: /()(agent-probes/.*) + - path: /agent-probes pathType: Prefix backend: service: name: nbiservice port: number: 8080 - - path: /()(osm-api/.*) + - path: /osm-api pathType: Prefix backend: service: diff --git a/scripts/run_tests_locally-telemetry-backend.sh b/scripts/run_tests_locally-telemetry-backend.sh index 6a6987020d05e6278c4fe9a8e641c58b7f5d391f..2f1a5127bbd6750e17e95f47a93571e6feca5eb4 100755 --- a/scripts/run_tests_locally-telemetry-backend.sh +++ b/scripts/run_tests_locally-telemetry-backend.sh @@ -38,7 +38,7 @@ export IP_CONTEXT echo "Context Service IP: ${IP_CONTEXT}" # Start Kafka port-forward in background -kubectl port-forward -n kafka service/kafka-public 9094:9094 > /dev/null 2>&1 & +kubectl port-forward -n kafka service/kafka-service 9094:9094 > /dev/null 2>&1 & KAFKA_PF_PID=$! # Function to cleanup port-forward on exit diff --git a/src/common/tools/context_queries/OpticalConfig.py b/src/common/tools/context_queries/OpticalConfig.py index 3e8a5380e7f1d46639a0890e43e89fe27f9ecd2d..7b4f6599e050f0a4b10dab36aa9b9700c3916ca8 100644 --- a/src/common/tools/context_queries/OpticalConfig.py +++ b/src/common/tools/context_queries/OpticalConfig.py @@ -13,12 +13,15 @@ # limitations under the License. -from common.method_wrappers.ServiceExceptions import InvalidArgumentsException -from context.client.ContextClient import ContextClient import logging from typing import Optional, Union from uuid import UUID, uuid4, uuid5 +from common.method_wrappers.ServiceExceptions import InvalidArgumentsException from common.proto.context_pb2 import OpticalBand, OpticalBandId, Empty +from context.client.ContextClient import ContextClient + +LOGGER = logging.getLogger(__name__) + # Generate a UUIDv5-like from the SHA-1 of "TFS" and no namespace to be used as the NAMESPACE for all # the context UUIDs generated. For efficiency purposes, the UUID is hardcoded; however, it is produced # using the following code: @@ -98,14 +101,15 @@ def ob_get_uuid( -def find_optical_band (ob_index)->OpticalBand: - +def find_optical_band(ob_index) -> Optional[OpticalBand]: op_uuid = ob_get_uuid(ob_index) - op_id=OpticalBandId() - op_id.opticalband_uuid.uuid =op_uuid + op_id = OpticalBandId() + op_id.opticalband_uuid.uuid = op_uuid try: ctxt = ContextClient() - target_ob= ctxt.SelectOpticalBand(op_id) + target_ob = ctxt.SelectOpticalBand(op_id) return target_ob - except Exception as e : - logging.debug(f"error in finding optical band {e}") + except Exception: + MSG = 'Unable to find OpticalBand({:s}/{:s}) in Context' + LOGGER.exception(MSG.format(str(ob_index), str(op_uuid))) + return None diff --git a/src/common/tools/kafka/Variables.py b/src/common/tools/kafka/Variables.py index 5a8e6821510b911cfd8e8a02c02e8f769fb39f77..f3e21c3ee7f6f868fc26e72124993cd60f99ea8e 100644 --- a/src/common/tools/kafka/Variables.py +++ b/src/common/tools/kafka/Variables.py @@ -33,9 +33,11 @@ class KafkaConfig(Enum): def get_kafka_address() -> str: kafka_server_address = get_setting('KFK_SERVER_ADDRESS', default=None) if kafka_server_address is None: - KFK_NAMESPACE = get_setting('KFK_NAMESPACE', default='kafka') + KFK_NAMESPACE = get_setting('KFK_NAMESPACE', default='kafka') KFK_PORT = get_setting('KFK_SERVER_PORT', default='9092') kafka_server_address = KFK_SERVER_ADDRESS_TEMPLATE.format(KFK_NAMESPACE, KFK_PORT) + LOGGER.debug('KFK_SERVER_ADDRESS not set, using default: {:s}'.format(kafka_server_address)) + LOGGER.debug('Using KFK_SERVER_ADDRESS={:s}'.format(kafka_server_address)) return kafka_server_address @staticmethod @@ -133,10 +135,3 @@ class KafkaTopic(Enum): else: LOGGER.debug('All topics created and available.') return True - - -if __name__ == '__main__': - import os - if 'KFK_SERVER_ADDRESS' not in os.environ: - os.environ['KFK_SERVER_ADDRESS'] = 'kafka-service.kafka.svc.cluster.local:9092' - KafkaTopic.create_all_topics() diff --git a/src/common/tools/object_factory/OpticalLink.py b/src/common/tools/object_factory/OpticalLink.py index e2f4f2420f10bbdc2b22d2651bc9c4aff5d43cd4..ff102d2796649b3d782c8b3d5706708c3b68c4f0 100644 --- a/src/common/tools/object_factory/OpticalLink.py +++ b/src/common/tools/object_factory/OpticalLink.py @@ -14,30 +14,28 @@ import copy -def convert_to_dict(single_val:int)->dict: - slot= dict() - bin_num = bin(single_val) - sliced_num=bin_num[2:] - for i in range(len(sliced_num)): - slot[str(i+1)]=int(sliced_num[i]) +def convert_to_dict(single_val: int, start_point: int = 0, width: int = None) -> dict: + slot = dict() + sliced_num = bin(single_val)[2:] + if width is not None: + sliced_num = sliced_num.zfill(width) + for i, bit in enumerate(sliced_num): + slot[str(start_point + i)] = int(bit) return slot -def correct_slot(dic: dict) -> dict: - _dict = copy.deepcopy(dic) - keys_list = list(_dict.keys()) - if len(keys_list) < 20: - num_keys = [int(i) for i in keys_list] - if num_keys[-1] != 20: - missed_keys = [] - diff = 20 - len(num_keys) - #print(f"diff {diff}") - for i in range(diff+1): - missed_keys.append(num_keys[-1]+i) - #print(f"missed_keys {missed_keys}") - for key in missed_keys : - _dict[key]=1 - #print(f"result {_dict}") - return _dict +def correct_slot(dic: dict, width: int = None) -> dict: + corrected = copy.deepcopy(dic) + if len(corrected) == 0: + return corrected + + normalized = {int(key): int(value) for key, value in corrected.items()} + max_slot = max(normalized.keys()) + max_range = width if width is not None else max_slot + 1 + + for slot_idx in range(max_range): + normalized.setdefault(slot_idx, 1) + + return {str(key): normalized[key] for key in sorted(normalized.keys())} ## To be deleted , needed now for development purpose ## diff --git a/src/common/tools/rest_api/client/RestApiClient.py b/src/common/tools/rest_api/client/RestApiClient.py index 68977a60e5f0fb3f48f111057730d043c085f023..62d30df4da8ffe2f9172d1248fcf5274790417f4 100644 --- a/src/common/tools/rest_api/client/RestApiClient.py +++ b/src/common/tools/rest_api/client/RestApiClient.py @@ -112,7 +112,10 @@ class RestApiClient: endpoint = str(self._base_url + '/' + endpoint).replace('//', '/').lstrip('/') request_url = TEMPLATE_URL.format(self._scheme, self._address, self._port, endpoint) - self._log_msg_request(method, request_url, body) + self._log_msg_request( + method, request_url, body, + log_level=logging.DEBUG + ) try: headers = {'accept': 'application/json'} @@ -127,7 +130,10 @@ class RestApiClient: if self._logger is not None: self._logger.exception(msg) raise Exception(msg) from e - self._log_msg_check_reply(method, request_url, body, reply, expected_status_codes) + self._log_msg_check_reply( + method, request_url, body, reply, expected_status_codes, + log_level=logging.DEBUG + ) if reply.content and len(reply.content) > 0: return reply.json() return None diff --git a/src/context/service/ContextServiceServicerImpl.py b/src/context/service/ContextServiceServicerImpl.py index 272a29be99197572b15012ca5009b7460e9d7c54..e42e1af733b09fc1145dfb37506cf159c97c27de 100644 --- a/src/context/service/ContextServiceServicerImpl.py +++ b/src/context/service/ContextServiceServicerImpl.py @@ -367,16 +367,14 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer def DeleteOpticalChannel(self, request : OpticalConfig, context : grpc.ServicerContext) -> Empty: delete_opticalchannel(self.db_engine, self.messagebroker, request) return Empty() - + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def GetOpticalBand(self, request : Empty, context : grpc.ServicerContext) -> OpticalBandList: - result = get_optical_band(self.db_engine) - return OpticalBandList(opticalbands=result) - - safe_and_metered_rpc_method(METRICS_POOL, LOGGER) + return get_optical_band(self.db_engine) + + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def SelectOpticalBand(self, request : OpticalBandId, context : grpc.ServicerContext) -> OpticalBand: - result = select_optical_band(self.db_engine,request ) - return result + return select_optical_band(self.db_engine, request) @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def SetOpticalBand(self, request : OpticalBand, context : grpc.ServicerContext) -> Empty: diff --git a/src/context/service/database/OpticalBand.py b/src/context/service/database/OpticalBand.py index 6057adaadc5dcde14f9fb95f2a4b82cc51a7d04c..0e6a8adb471cb8fcda67a02fa9965d616a405da5 100644 --- a/src/context/service/database/OpticalBand.py +++ b/src/context/service/database/OpticalBand.py @@ -13,58 +13,60 @@ # limitations under the License. import logging +from sqlalchemy.dialects.postgresql import insert from sqlalchemy.engine import Engine -from sqlalchemy.orm import Session, selectinload, sessionmaker +from sqlalchemy.orm import Session, sessionmaker from sqlalchemy_cockroachdb import run_transaction -from sqlalchemy.dialects.postgresql import insert +from typing import Dict, List, Optional from common.method_wrappers.ServiceExceptions import NotFoundException -from typing import Dict, List -from common.proto.context_pb2 import OpticalBand,OpticalBandId,OpticalBandList +from common.proto.context_pb2 import OpticalBand, OpticalBandId, OpticalBandList from .models.OpticalConfig.OpticalBandModel import OpticalBandModel LOGGER = logging.getLogger(__name__) - -def get_optical_band(db_engine : Engine): - def callback(session:Session): - results = session.query(OpticalBandModel).all() - - return [obj.dump() for obj in results] - obj = run_transaction(sessionmaker(bind=db_engine), callback) - return obj +def get_optical_band(db_engine : Engine) -> OpticalBandList: + def callback(session : Session) -> List[Dict]: + obj_list : List[OpticalBandModel] = session.query(OpticalBandModel).all() + return [obj.dump() for obj in obj_list] + optical_bands = run_transaction(sessionmaker(bind=db_engine), callback) + return OpticalBandList(opticalbands=optical_bands) -def select_optical_band( db_engine : Engine ,request:OpticalBandId): +def select_optical_band(db_engine : Engine, request : OpticalBandId) -> OpticalBand: ob_uuid = request.opticalband_uuid.uuid - def callback(session : Session) -> OpticalBand: + def callback(session : Session) -> Optional[Dict]: stmt = session.query(OpticalBandModel) stmt = stmt.filter_by(ob_uuid=ob_uuid) - obj = stmt.first() - if obj is not None: - - return obj.dump() - return None - result= run_transaction(sessionmaker(bind=db_engine, expire_on_commit=False), callback) - if result is None : - return result - return OpticalBand(**result) - + obj = stmt.one_or_none() + return None if obj is None else obj.dump() + obj = run_transaction(sessionmaker(bind=db_engine, expire_on_commit=False), callback) + if obj is None: + raw_ob_uuid = request.opticalband_uuid.uuid + raise NotFoundException('OpticalBand', raw_ob_uuid, extra_details=[ + 'opticalband_uuid generated was: {:s}'.format(ob_uuid) + ]) + return OpticalBand(**obj) + + +def set_optical_band(db_engine : Engine, ob_data : List[Dict]) -> Dict: + LOGGER.debug('[update_opticalconfig] ob_data={:s}'.format(str(ob_data))) + + def callback(session : Session) -> Optional[str]: + if len(ob_data) == 0: return None + + stmt = insert(OpticalBandModel).values(ob_data) + stmt = stmt.on_conflict_do_update( + index_elements=[OpticalBandModel.ob_uuid], + set_=dict( + connection_uuid = stmt.excluded.connection_uuid + ) + ) + stmt = stmt.returning(OpticalBandModel.ob_uuid) + ob_id = session.execute(stmt).fetchone() + return ob_id -def set_optical_band(db_engine : Engine, ob_data ): - - def callback(session : Session) -> List[Dict]: - if len(ob_data) > 0: - stmt = insert(OpticalBandModel).values(ob_data) - stmt = stmt.on_conflict_do_update( - index_elements=[OpticalBandModel.ob_uuid], - set_=dict( - connection_uuid = stmt.excluded.connection_uuid - ) - ) - stmt = stmt.returning(OpticalBandModel.ob_uuid) - ob_id = session.execute(stmt).fetchone() - ob_id = run_transaction(sessionmaker(bind=db_engine), callback) - return {'ob_id': ob_id} + LOGGER.debug('[update_opticalconfig] ob_id={:s}'.format(str(ob_id))) + return {'ob_id': ob_id} diff --git a/src/context/service/database/OpticalConfig.py b/src/context/service/database/OpticalConfig.py index 7f6942d046e2ec477422d3450625bd370a3baf08..978947c26571bbdffa4ad3db936ea621709ebe1c 100644 --- a/src/context/service/database/OpticalConfig.py +++ b/src/context/service/database/OpticalConfig.py @@ -13,17 +13,18 @@ # limitations under the License. import json, logging ,datetime -from sqlalchemy.dialects.postgresql import insert -from common.message_broker.MessageBroker import MessageBroker -from common.DeviceTypes import DeviceTypeEnum from sqlalchemy import inspect +from sqlalchemy.dialects.postgresql import insert from sqlalchemy.engine import Engine from sqlalchemy.orm import Session, sessionmaker from sqlalchemy_cockroachdb import run_transaction +from common.message_broker.MessageBroker import MessageBroker from common.proto.context_pb2 import OpticalConfig, OpticalConfigId, Empty, EventTypeEnum +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.DeviceTypes import DeviceTypeEnum from .models.OpticalConfig.OpticalConfigModel import OpticalConfigModel -from .models.OpticalConfig.TransponderModel import TransponderTypeModel, OpticalChannelModel, TransponderInterfaceModel from .models.OpticalConfig.RoadmModel import RoadmTypeModel, ChannelModel, ORInterfaceModel +from .models.OpticalConfig.TransponderModel import TransponderTypeModel, OpticalChannelModel, TransponderInterfaceModel from context.service.database.uuids.OpticalConfig import ( @@ -33,8 +34,8 @@ from context.service.database.uuids.OpticalConfig import ( ) from .Events import notify_event_opticalconfig from .OpticalBand import set_optical_band + LOGGER = logging.getLogger(__name__) -now = datetime.datetime.utcnow() def get_opticalconfig(db_engine : Engine): def callback(session:Session): @@ -343,6 +344,8 @@ def set_opticalconfig(db_engine : Engine, request : OpticalConfig): return {'opticalconfig_uuid': opticalconfig_id} def update_opticalconfig(db_engine : Engine, request : OpticalConfig): + LOGGER.debug('[update_opticalconfig] received request: {:s}'.format(grpc_message_to_json_string(request))) + opticalconfig_id = OpticalConfigId() device_id = request.device_id device_uuid = request.device_id.device_uuid.uuid @@ -357,8 +360,11 @@ def update_opticalconfig(db_engine : Engine, request : OpticalConfig): #is_transpondre = False opticalconfig_uuid = opticalconfig_get_uuid(device_id) is_optical_band=None + + LOGGER.debug('[update_opticalconfig] request.config={:s}'.format(str(request.config))) if request.config : config = json.loads(request.config) + LOGGER.debug('[update_opticalconfig] config={:s}'.format(str(config))) if 'new_config' in config: if 'type' in config: @@ -368,7 +374,7 @@ def update_opticalconfig(db_engine : Engine, request : OpticalConfig): if 'channel_namespace' in config['new_config']: channel_namespace = config['new_config'] ['channel_namespace'] - if config_type == DeviceTypeEnum.OPTICAL_TRANSPONDER._value_: + if config_type == DeviceTypeEnum.OPTICAL_TRANSPONDER.value: is_transpondre = True transceivers = [] if channel_namespace is None and 'channel_namespace' in config: @@ -449,10 +455,16 @@ def update_opticalconfig(db_engine : Engine, request : OpticalConfig): "opticalconfig_uuid": opticalconfig_uuid, }) - if config_type == DeviceTypeEnum.OPTICAL_ROADM._value_: + if config_type == DeviceTypeEnum.OPTICAL_ROADM.value: + MSG = '[update_opticalconfig] config_type == DeviceTypeEnum.OPTICAL_ROADM.value; config_type={:s}' + LOGGER.debug(MSG.format(str(config_type))) + if channel_namespace is None and 'channel_namespace' in config['new_config']: channel_namespace=config['new_config']['channel_namespace'] + if 'is_opticalband' in config and not config['is_opticalband']: + MSG = '[update_opticalconfig] is_opticalband in config and not config[is_opticalband]; config={:s}' + LOGGER.debug(MSG.format(str(config))) is_optical_band=config['is_opticalband'] bidir = config['new_config']['bidir'] #channels = [channel['name']['index'] for channel in config['channels']] @@ -479,7 +491,9 @@ def update_opticalconfig(db_engine : Engine, request : OpticalConfig): }) if not bidir: break if 'is_opticalband' in config and config['is_opticalband']: - is_optical_band=config['is_opticalband'] + MSG = '[update_opticalconfig] is_opticalband in config and config[is_opticalband]; config={:s}' + LOGGER.debug(MSG.format(str(config))) + is_optical_band = config['is_opticalband'] #channels = [channel['name']['index'] for channel in config['channels']] if 'flow_handled' in config and len(config['flow_handled']) > 0: @@ -502,6 +516,7 @@ def update_opticalconfig(db_engine : Engine, request : OpticalConfig): "type" : 'optical_band', "channel_index" : str( channel_index) if channel_index is not None else None }) + now = datetime.datetime.utcnow() optical_bands.append ({ "channel_uuid" : channel_get_uuid(f'optical_bands_{channel_index}',device_uuid), 'connection_uuid' : config['connection_uuid'], @@ -601,10 +616,11 @@ def update_opticalconfig(db_engine : Engine, request : OpticalConfig): ) stmt = stmt.returning(ChannelModel.channel_uuid) opticalChannel_id = session.execute(stmt).fetchone() - + opticalconfig_id = run_transaction(sessionmaker(bind=db_engine), callback) - if is_optical_band: set_optical_band(db_engine,optical_bands) + LOGGER.debug('[update_opticalconfig] is_optical_band={:s}'.format(str(is_optical_band))) + if is_optical_band: set_optical_band(db_engine,optical_bands) return {'opticalconfig_uuid': opticalconfig_id} def select_opticalconfig(db_engine : Engine, request : OpticalConfigId): diff --git a/src/context/service/database/models/Slot.py b/src/context/service/database/models/Slot.py index 6733642769b5d4c1ed8692fbb86a70095c76c8f5..e6592621646b1023d78ed28ece0818ef83a5b96e 100644 --- a/src/context/service/database/models/Slot.py +++ b/src/context/service/database/models/Slot.py @@ -68,15 +68,15 @@ class SlotType(TypeDecorator): class C_Slot(SlotType): - start_point = 1 + start_point = 0 width = 320 class L_Slot(SlotType): - start_point = 101 + start_point = 0 width = 550 class S_Slot(SlotType): - start_point = 501 + start_point = 0 width = 720 diff --git a/src/context/tests/test_optical_link_slots.py b/src/context/tests/test_optical_link_slots.py index 17f92c2bb8c18c3c26fb3194abdd1cb1219b9744..f7f406fe67d13c7d21b2e65a3ed4052832a1c6ef 100644 --- a/src/context/tests/test_optical_link_slots.py +++ b/src/context/tests/test_optical_link_slots.py @@ -44,28 +44,30 @@ def build_sparse_slot_input(active_slots): @pytest.mark.parametrize( - 'slot_type,width,active_slots', + 'slot_type,active_slots', [ - (C_Slot(), 320, [1, 18, 320]), - (L_Slot(), 550, [101, 202, 650]), - (S_Slot(), 720, [501, 706, 1220]), + (C_Slot(), [0, 17, 319]), + (L_Slot(), [0, 101, 549]), + (S_Slot(), [0, 205, 719]), ], ) -def test_slot_type_roundtrip_preserves_positions(slot_type, width, active_slots) -> None: +def test_slot_type_roundtrip_preserves_positions(slot_type, active_slots) -> None: sparse_input = build_sparse_slot_input(active_slots) encoded = slot_type.process_bind_param(sparse_input, dialect=None) decoded = slot_type.process_result_value(encoded, dialect=None) assert encoded is not None - assert decoded == build_expected_slot_map(slot_type.start_point, width, active_slots) + assert decoded == build_expected_slot_map( + slot_type.start_point, slot_type.width, active_slots + ) @pytest.mark.parametrize( 'slot_type,invalid_key', [ - (C_Slot(), 321), - (L_Slot(), 651), - (S_Slot(), 1221), + (C_Slot(), 320), + (L_Slot(), 550), + (S_Slot(), 720), ], ) def test_slot_type_rejects_out_of_range_keys(slot_type, invalid_key: int) -> None: @@ -76,9 +78,9 @@ def test_slot_type_rejects_out_of_range_keys(slot_type, invalid_key: int) -> Non def _run_slot_smoke_test(engine: sqlalchemy.engine.Engine) -> None: Base.metadata.create_all(engine) try: - c_slots = build_sparse_slot_input([1, 11, 320]) - l_slots = build_sparse_slot_input([101, 113, 650]) - s_slots = build_sparse_slot_input([501, 515, 1220]) + c_slots = build_sparse_slot_input([0, 10, 319]) + l_slots = build_sparse_slot_input([0, 12, 549]) + s_slots = build_sparse_slot_input([0, 14, 719]) with Session(engine) as session: session.add(SlotSmokeModel(id=1, c_slots=c_slots, l_slots=l_slots, s_slots=s_slots)) @@ -86,9 +88,9 @@ def _run_slot_smoke_test(engine: sqlalchemy.engine.Engine) -> None: with Session(engine) as session: stored = session.query(SlotSmokeModel).filter_by(id=1).one() - assert stored.c_slots == build_expected_slot_map(1, 320, [1, 11, 320]) - assert stored.l_slots == build_expected_slot_map(101, 550, [101, 113, 650]) - assert stored.s_slots == build_expected_slot_map(501, 720, [501, 515, 1220]) + assert stored.c_slots == build_expected_slot_map(0, 320, [0, 10, 319]) + assert stored.l_slots == build_expected_slot_map(0, 550, [0, 12, 549]) + assert stored.s_slots == build_expected_slot_map(0, 720, [0, 14, 719]) finally: Base.metadata.drop_all(engine) @@ -99,9 +101,7 @@ def test_slot_smoke_sqlite() -> None: def test_slot_smoke_cockroachdb() -> None: - crdb_uri = os.environ.get('CRDB_URI') - if crdb_uri is None: - pytest.skip('CRDB_URI is not set') + crdb_uri = os.environ['CRDB_URI'] engine = sqlalchemy.create_engine( crdb_uri, connect_args={'application_name': 'tfs-slot-smoketest'}, future=True ) diff --git a/src/device/service/drivers/ietf_l2vpn/TfsApiClient.py b/src/device/service/drivers/ietf_l2vpn/TfsApiClient.py index 1b906b82000f5f8ab421e2bb34fa6686468a8f5d..2bee999c6371393e5a61da9609999f817af7e0ba 100644 --- a/src/device/service/drivers/ietf_l2vpn/TfsApiClient.py +++ b/src/device/service/drivers/ietf_l2vpn/TfsApiClient.py @@ -12,11 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging, requests +import json, logging, requests from typing import Dict, List, Optional from common.tools.rest_api.client.RestApiClient import RestApiClient from device.service.driver_api.ImportTopologyEnum import ImportTopologyEnum + GET_CONTEXT_IDS_URL = '/tfs-api/context_ids' GET_DEVICES_URL = '/tfs-api/devices' GET_LINKS_URL = '/tfs-api/links' @@ -52,8 +53,10 @@ MAPPING_DRIVER = { 'DEVICEDRIVER_RESTCONF_OPENCONFIG' : 21, } + LOGGER = logging.getLogger(__name__) + class TfsApiClient(RestApiClient): def __init__( self, address : str, port : int, scheme : str = 'http', @@ -65,9 +68,26 @@ class TfsApiClient(RestApiClient): timeout=timeout, verify_certs=False, allow_redirects=True, logger=LOGGER ) - def check_credentials(self) -> None: - self.get(GET_CONTEXT_IDS_URL, expected_status_codes={requests.codes['OK']}) - LOGGER.info('Credentials checked') + + def check_credentials(self, raise_if_fail : bool = True) -> None: + try: + LOGGER.info('Checking credentials...') + self.get(GET_CONTEXT_IDS_URL, expected_status_codes={requests.codes['OK']}) + LOGGER.info('Credentials checked') + return True + except requests.exceptions.Timeout as e: + MSG = 'Timeout connecting {:s}' + msg = MSG.format(GET_CONTEXT_IDS_URL) + LOGGER.exception(msg) + if raise_if_fail: raise Exception(msg) from e + return False + except Exception as e: + MSG = 'Exception connecting credentials: {:s}' + msg = MSG.format(GET_CONTEXT_IDS_URL) + LOGGER.exception(msg) + if raise_if_fail: raise Exception(msg) from e + return False + def get_devices_endpoints( self, import_topology : ImportTopologyEnum = ImportTopologyEnum.DEVICES @@ -88,6 +108,10 @@ class TfsApiClient(RestApiClient): device_type : str = json_device['device_type'] #if not device_type.startswith('emu-'): device_type = 'emu-' + device_type device_status = json_device['device_operational_status'] + + ctrl_id : Dict[str, Dict] = json_device.get('controller_id', dict()) + ctrl_uuid : Optional[str] = ctrl_id.get('device_uuid', dict()).get('uuid') + device_url = '/devices/device[{:s}]'.format(device_uuid) device_data = { 'uuid': json_device['device_id']['device_uuid']['uuid'], @@ -99,17 +123,46 @@ class TfsApiClient(RestApiClient): for driver in json_device['device_drivers'] ], } + if ctrl_uuid is not None and len(ctrl_uuid) > 0: + device_data['ctrl_uuid'] = ctrl_uuid result.append((device_url, device_data)) + config_rule_list : List[Dict] = ( + json_device + .get('device_config', dict()) + .get('config_rules', list()) + ) + config_rule_dict : Dict[str, Dict] = dict() + for cr in config_rule_list: + if cr['action'] != 'CONFIGACTION_SET': continue + if 'custom' not in cr: continue + cr_rk : str = cr['custom']['resource_key'] + if not cr_rk.startswith('/endpoints/endpoint['): continue + settings = json.loads(cr['custom']['resource_value']) + ep_uuid = settings.get('uuid') + if ep_uuid is not None: + config_rule_dict[ep_uuid] = settings + ep_name = settings.get('name') + if ep_name is not None: + config_rule_dict[ep_name] = settings + for json_endpoint in json_device['device_endpoints']: endpoint_uuid = json_endpoint['endpoint_id']['endpoint_uuid']['uuid'] + endpoint_name = json_endpoint['name'] endpoint_url = '/endpoints/endpoint[{:s}]'.format(endpoint_uuid) endpoint_data = { 'device_uuid': device_uuid, 'uuid': endpoint_uuid, - 'name': json_endpoint['name'], + 'name': endpoint_name, 'type': json_endpoint['endpoint_type'], } + endpoint_settings = config_rule_dict.get(endpoint_uuid) + if endpoint_settings is not None: + endpoint_data['settings'] = endpoint_settings + else: + endpoint_settings = config_rule_dict.get(endpoint_name) + if endpoint_settings is not None: + endpoint_data['settings'] = endpoint_settings result.append((endpoint_url, endpoint_data)) if import_topology == ImportTopologyEnum.DEVICES: diff --git a/src/device/service/drivers/ietf_l3vpn/TfsApiClient.py b/src/device/service/drivers/ietf_l3vpn/TfsApiClient.py index c984c1adf2200b6150a5b59e416c85bf0ec7cdb3..6eea8d6bfbd323e95fddaa14410f32a35e5c7022 100644 --- a/src/device/service/drivers/ietf_l3vpn/TfsApiClient.py +++ b/src/device/service/drivers/ietf_l3vpn/TfsApiClient.py @@ -137,15 +137,19 @@ class TfsApiClient(RestApiClient): .get('device_config', dict()) .get('config_rules', list()) ) - config_rule_dict = dict() + config_rule_dict : Dict[str, Dict] = dict() for cr in config_rule_list: if cr['action'] != 'CONFIGACTION_SET': continue if 'custom' not in cr: continue cr_rk : str = cr['custom']['resource_key'] if not cr_rk.startswith('/endpoints/endpoint['): continue settings = json.loads(cr['custom']['resource_value']) - ep_name = settings['name'] - config_rule_dict[ep_name] = settings + ep_uuid = settings.get('uuid') + if ep_uuid is not None: + config_rule_dict[ep_uuid] = settings + ep_name = settings.get('name') + if ep_name is not None: + config_rule_dict[ep_name] = settings for json_endpoint in json_device['device_endpoints']: endpoint_uuid = json_endpoint['endpoint_id']['endpoint_uuid']['uuid'] @@ -157,9 +161,13 @@ class TfsApiClient(RestApiClient): 'name': endpoint_name, 'type': json_endpoint['endpoint_type'], } - endpoint_settings = config_rule_dict.get(endpoint_name) + endpoint_settings = config_rule_dict.get(endpoint_uuid) if endpoint_settings is not None: endpoint_data['settings'] = endpoint_settings + else: + endpoint_settings = config_rule_dict.get(endpoint_name) + if endpoint_settings is not None: + endpoint_data['settings'] = endpoint_settings result.append((endpoint_url, endpoint_data)) if import_topology == ImportTopologyEnum.DEVICES: diff --git a/src/device/service/drivers/ietf_slice/TfsApiClient.py b/src/device/service/drivers/ietf_slice/TfsApiClient.py index 0388e91057ad7d65631f66d495112de03f02b72d..8770af2c50af76b8634e7ccf3b3ef3beab147b24 100644 --- a/src/device/service/drivers/ietf_slice/TfsApiClient.py +++ b/src/device/service/drivers/ietf_slice/TfsApiClient.py @@ -138,15 +138,19 @@ class TfsApiClient(RestApiClient): .get('device_config', dict()) .get('config_rules', list()) ) - config_rule_dict = dict() + config_rule_dict : Dict[str, Dict] = dict() for cr in config_rule_list: if cr['action'] != 'CONFIGACTION_SET': continue if 'custom' not in cr: continue cr_rk : str = cr['custom']['resource_key'] if not cr_rk.startswith('/endpoints/endpoint['): continue settings = json.loads(cr['custom']['resource_value']) - ep_name = settings['name'] - config_rule_dict[ep_name] = settings + ep_uuid = settings.get('uuid') + if ep_uuid is not None: + config_rule_dict[ep_uuid] = settings + ep_name = settings.get('name') + if ep_name is not None: + config_rule_dict[ep_name] = settings for json_endpoint in json_device['device_endpoints']: endpoint_uuid = json_endpoint['endpoint_id']['endpoint_uuid']['uuid'] @@ -158,9 +162,13 @@ class TfsApiClient(RestApiClient): 'name': endpoint_name, 'type': json_endpoint['endpoint_type'], } - endpoint_settings = config_rule_dict.get(endpoint_name) + endpoint_settings = config_rule_dict.get(endpoint_uuid) if endpoint_settings is not None: endpoint_data['settings'] = endpoint_settings + else: + endpoint_settings = config_rule_dict.get(endpoint_name) + if endpoint_settings is not None: + endpoint_data['settings'] = endpoint_settings result.append((endpoint_url, endpoint_data)) if import_topology == ImportTopologyEnum.DEVICES: diff --git a/src/device/service/drivers/optical_tfs/TfsApiClient.py b/src/device/service/drivers/optical_tfs/TfsApiClient.py index 59126c7b1734a3cc298c26cfdebc5aaa904e02b9..ce8f00df19672b7dee580062b2832467cbdf9e5f 100644 --- a/src/device/service/drivers/optical_tfs/TfsApiClient.py +++ b/src/device/service/drivers/optical_tfs/TfsApiClient.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging +import logging, requests from typing import Dict, List, Optional, Tuple from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME from common.proto.context_pb2 import ServiceStatusEnum, ServiceTypeEnum @@ -24,11 +24,13 @@ from common.tools.object_factory.EndPoint import json_endpoint_id from common.tools.object_factory.Service import json_service from device.service.driver_api.ImportTopologyEnum import ImportTopologyEnum -CONTEXT_IDS_URL = '/tfs-api/context_ids' + +GET_CONTEXT_IDS_URL = '/tfs-api/context_ids' TOPOLOGY_URL = '/tfs-api/context/{context_uuid:s}/topology_details/{topology_uuid:s}' SERVICES_URL = '/tfs-api/context/{context_uuid:s}/services' SERVICE_URL = '/tfs-api/context/{context_uuid:s}/service/{service_uuid:s}' + MAPPING_STATUS = { 'DEVICEOPERATIONALSTATUS_UNDEFINED': 0, 'DEVICEOPERATIONALSTATUS_DISABLED' : 1, @@ -60,8 +62,10 @@ MAPPING_DRIVER = { 'DEVICEDRIVER_RESTCONF_OPENCONFIG' : 21, } + LOGGER = logging.getLogger(__name__) + class TfsApiClient(RestApiClient): def __init__( self, address : str, port : int, scheme : str = 'http', @@ -73,9 +77,26 @@ class TfsApiClient(RestApiClient): timeout=timeout, verify_certs=False, allow_redirects=True, logger=LOGGER ) - def check_credentials(self) -> None: - self.get(CONTEXT_IDS_URL) - LOGGER.info('Credentials checked') + + def check_credentials(self, raise_if_fail : bool = True) -> None: + try: + LOGGER.info('Checking credentials...') + self.get(GET_CONTEXT_IDS_URL, expected_status_codes={requests.codes['OK']}) + LOGGER.info('Credentials checked') + return True + except requests.exceptions.Timeout as e: + MSG = 'Timeout connecting {:s}' + msg = MSG.format(GET_CONTEXT_IDS_URL) + LOGGER.exception(msg) + if raise_if_fail: raise Exception(msg) from e + return False + except Exception as e: + MSG = 'Exception connecting credentials: {:s}' + msg = MSG.format(GET_CONTEXT_IDS_URL) + LOGGER.exception(msg) + if raise_if_fail: raise Exception(msg) from e + return False + def get_devices_endpoints( self, import_topology : ImportTopologyEnum = ImportTopologyEnum.DEVICES @@ -113,11 +134,12 @@ class TfsApiClient(RestApiClient): for json_endpoint in json_device['device_endpoints']: endpoint_uuid = json_endpoint['endpoint_id']['endpoint_uuid']['uuid'] + endpoint_name = json_endpoint['name'] endpoint_url = '/endpoints/endpoint[{:s}]'.format(endpoint_uuid) endpoint_data = { 'device_uuid': device_uuid, 'uuid': endpoint_uuid, - 'name': json_endpoint['name'], + 'name': endpoint_name, 'type': json_endpoint['endpoint_type'], } result.append((endpoint_url, endpoint_data)) diff --git a/src/e2e_orchestrator/service/subscriptions/dispatchers/recommendation/Tools.py b/src/e2e_orchestrator/service/subscriptions/dispatchers/recommendation/Tools.py index bb86ff222dd3152bfe45c2111ce4dd311048579b..db5ccbe2e7940bafeca1db16d3e00c48f255b015 100644 --- a/src/e2e_orchestrator/service/subscriptions/dispatchers/recommendation/Tools.py +++ b/src/e2e_orchestrator/service/subscriptions/dispatchers/recommendation/Tools.py @@ -151,6 +151,7 @@ def compose_optical_service(vlink_request : Dict) -> Dict: LOGGER.info('[compose_optical_service] optical_border_endpoint_ids={:s}'.format(str(optical_border_endpoint_ids))) constraints = [ + json_constraint_custom('type', 'multi_granular'), json_constraint_custom('bandwidth[gbps]', str(vlink_request['attributes']['total_capacity_gbps'])), json_constraint_custom('bidirectionality', '1'), ] @@ -159,6 +160,8 @@ def compose_optical_service(vlink_request : Dict) -> Dict: if vlink_service_uuid == 'IP1/PORT-xe1==IP2/PORT-xe1': constraints.append(json_constraint_custom('optical-band-width[GHz]', '300')) + else: + constraints.append(json_constraint_custom('optical_band_id', '1')) vlink_optical_service = json_service( vlink_service_uuid, diff --git a/src/nbi/service/vntm_recommend/Namespaces.py b/src/nbi/service/vntm_recommend/Namespaces.py index 9a2fdcf3590e20a13fe192e5cc618d3c2e928a97..9af29a4361bc9674d6aeb1d081e79cb5105838c7 100644 --- a/src/nbi/service/vntm_recommend/Namespaces.py +++ b/src/nbi/service/vntm_recommend/Namespaces.py @@ -45,32 +45,42 @@ class VntRecommServerNamespace(Namespace): LOGGER.debug(MSG.format(str(request.sid), str(reason))) leave_room(SIO_ROOM, namespace=SIO_NAMESPACE) - def on_vlink_created(self, data): - MSG = '[on_vlink_created] begin: sid={:s}, data={:s}' - LOGGER.debug(MSG.format(str(request.sid), str(data))) + @staticmethod + def _parse_payload(data): + if isinstance(data, str): + return json.loads(data) + if isinstance(data, dict): + return dict(data) + raise TypeError('Unsupported recommendation callback payload type: {:s}'.format(type(data).__name__)) + + def _publish_reply(self, event_name: str, data) -> None: + sid = getattr(request, 'sid', '') + LOGGER.info('[%s] begin: sid=%s payload=%s', event_name, sid, str(data)) - data = json.loads(data) - request_key = str(data.pop('_request_key')).encode('utf-8') - vntm_reply = json.dumps({'event': 'vlink_created', 'data': data}).encode('utf-8') - LOGGER.debug('[on_vlink_created] request_key={:s}/{:s}'.format(str(type(request_key)), str(request_key))) - LOGGER.debug('[on_vlink_created] vntm_reply={:s}/{:s}'.format(str(type(vntm_reply)), str(vntm_reply))) + json_data = self._parse_payload(data) + request_key = str(json_data.pop('_request_key')).encode('utf-8') + vntm_reply = json.dumps({'event': event_name, 'data': json_data}).encode('utf-8') + LOGGER.info( + '[%s] Publishing Kafka reply: request_key=%s payload=%s', + event_name, request_key.decode('utf-8'), vntm_reply.decode('utf-8') + ) self.kafka_producer.send( KafkaTopic.VNTMANAGER_RESPONSE.value, key=request_key, value=vntm_reply ) self.kafka_producer.flush() + LOGGER.info('[%s] Kafka reply published', event_name) - def on_vlink_removed(self, data): - MSG = '[on_vlink_removed] begin: sid={:s}, data={:s}' - LOGGER.debug(MSG.format(str(request.sid), str(data))) - - data = json.loads(data) - request_key = str(data.pop('_request_key')).encode('utf-8') - vntm_reply = json.dumps({'event': 'vlink_removed', 'data': data}).encode('utf-8') - LOGGER.debug('[on_vlink_removed] request_key={:s}/{:s}'.format(str(type(request_key)), str(request_key))) - LOGGER.debug('[on_vlink_removed] vntm_reply={:s}/{:s}'.format(str(type(vntm_reply)), str(vntm_reply))) + def on_vlink_created(self, data): + try: + self._publish_reply('vlink_created', data) + except Exception: + LOGGER.exception('[on_vlink_created] Failed to process callback') + raise - self.kafka_producer.send( - KafkaTopic.VNTMANAGER_RESPONSE.value, key=request_key, value=vntm_reply - ) - self.kafka_producer.flush() + def on_vlink_removed(self, data): + try: + self._publish_reply('vlink_removed', data) + except Exception: + LOGGER.exception('[on_vlink_removed] Failed to process callback') + raise diff --git a/src/opticalcontroller/OpticalController.py b/src/opticalcontroller/OpticalController.py index 90c2c246cd8d873dd6f637ab3f220eef25532e5d..59925639b4b9f78a0ac0f5566faf0c81c9cb10db 100644 --- a/src/opticalcontroller/OpticalController.py +++ b/src/opticalcontroller/OpticalController.py @@ -442,7 +442,7 @@ class GetTopology(Resource): continue dev_dic = { - "id":device.device_id.device_uuid.uuid, + "id": device.device_id.device_uuid.uuid, #"ip":f"10.30.2.{207+i}", #"port":"50001", "type": dev_type, diff --git a/src/opticalcontroller/RSA.py b/src/opticalcontroller/RSA.py index dc008725bdd2271551c4fda17c6c2275a87686a5..7aa170736ebdf2629e99a1c67b36e925a4501141 100644 --- a/src/opticalcontroller/RSA.py +++ b/src/opticalcontroller/RSA.py @@ -17,12 +17,14 @@ from opticalcontroller.dijkstra import * from opticalcontroller.tools import * from opticalcontroller.variables import * -''' + +logging.basicConfig(level=logging.DEBUG) LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) def print(*args) -> None: LOGGER.info(' '.join([str(a) for a in args])) -''' + class RSA(): def __init__(self, nodes, links): @@ -67,28 +69,32 @@ class RSA(): return "{},{},{}".format(self.c_slot_number, self.l_slot_number, self.s_slot_number) def init_link_slots2(self): + def initialize_band_slots(fib: dict, band_name: str, width: int) -> int: + band_slots = fib.get(band_name) + if not band_slots: + return 0 + + fib[band_name] = {str(slot_index): 1 for slot_index in range(width)} + return width + if full_links: print("2026 initialize full spectrum") for l in self.links_dict["optical_links"]: fib = l["optical_details"] - #fib = self.links_dict[l]["fibers"][f] - if len(fib["c_slots"]) > 0: - for c in range(0, Nc): - fib["c_slots"][str(c)] = 1 - if len(fib["l_slots"]) > 0: - for c in range(0, Nl): - fib["l_slots"][str(c)] = 1 - if len(fib["s_slots"]) > 0: - for c in range(0, Ns): - fib["s_slots"][str(c)] = 1 - if debug: - print(fib) - for l1 in self.links_dict["optical_links"]: - fib1 = l1["optical_details"] - self.c_slot_number = len(fib1["c_slots"].keys()) - self.l_slot_number = len(fib1["l_slots"].keys()) - self.s_slot_number = len(fib1["s_slots"].keys()) - break + + self.c_slot_number = initialize_band_slots(fib, "c_slots", Nc) + self.l_slot_number = initialize_band_slots(fib, "l_slots", Nl) + self.s_slot_number = initialize_band_slots(fib, "s_slots", Ns) + #if debug: + # print(fib) + + if self.c_slot_number == 0 and self.l_slot_number == 0 and self.s_slot_number == 0: + for l1 in self.links_dict["optical_links"]: + fib1 = l1["optical_details"] + self.c_slot_number = len(fib1.get("c_slots", {}).keys()) + self.l_slot_number = len(fib1.get("l_slots", {}).keys()) + self.s_slot_number = len(fib1.get("s_slots", {}).keys()) + break return "{},{},{}".format(self.c_slot_number, self.l_slot_number, self.s_slot_number) def initGraph(self): @@ -96,8 +102,8 @@ class RSA(): for n in self.nodes_dict: self.g.add_vertex(n) for l in self.links_dict["optical_links"]: - if debug: - print(l) + #if debug: + # print(l) [s, d] = l["optical_link"]["name"].split('-') ps = l["optical_link"]["details"]["source"] pd = l["optical_link"]["details"]["target"] @@ -114,8 +120,8 @@ class RSA(): for n in self.nodes_dict: self.g.add_vertex(n) for l in self.links_dict["optical_links"]: - if debug: - print(l) + #if debug: + # print(l) [s, d] = l["name"].split('-') ps = l["optical_details"]["src_port"] pd = l["optical_details"]["dst_port"] @@ -133,21 +139,21 @@ class RSA(): links = [] for i in range(0, len(path) - 1): s = path[i] - if debug: - print(s) + #if debug: + # print(s) if i < len(path) - 1: d = path[i + 1] link_id = "{}-{}".format(s, d) - if debug: - #print(link_id, self.links_dict[link_id]) - print(link_id, self.get_link_by_name(link_id)) + #if debug: + # #print(link_id, self.links_dict[link_id]) + # print(link_id, self.get_link_by_name(link_id)) links.append(link_id) self.g.reset_graph() return links, path def compute_disjoint_path(self, src, dst, path1=None): - if path1 == None: + if path1 is None: path1 = shortest_path(self.g, self.g.get_vertex(src), self.g.get_vertex(dst)) path = disjoint_path(self.g, src, dst, path1, False) print("INFO: Path from {} to {} with distance: {}".format(src, dst, self.g.get_vertex(dst).get_distance())) @@ -156,14 +162,14 @@ class RSA(): links = [] for i in range(0, len(path) - 1): s = path[i] - if debug: - print(s) + #if debug: + # print(s) if i < len(path) - 1: d = path[i + 1] link_id = "{}-{}".format(s, d) - if debug: - #print(link_id, self.links_dict[link_id]) - print(link_id, self.get_link_by_name(link_id)) + #if debug: + # #print(link_id, self.links_dict[link_id]) + # print(link_id, self.get_link_by_name(link_id)) links.append(link_id) self.g.reset_graph() @@ -355,12 +361,12 @@ class RSA(): #self.optical_bands[optical_band_id][band].sort() def restore_optical_band_2(self, optical_band_id, slots, band ,links): - print(f"example of band { band}") - print(f"example of slots {slots}") - print(f"example of self.optical_bands_before { self.optical_bands}") + #print(f"example of band { band}") + #print(f"example of slots {slots}") + #print(f"example of self.optical_bands_before { self.optical_bands}") for i in slots: self.optical_bands[optical_band_id][band][str(i)] = 1 - print(f"example of self.optical_bands_after { self.optical_bands}") + #print(f"example of self.optical_bands_after { self.optical_bands}") #link_name= self.optical_bands[optical_band_id]['links'][0] #link = self.get_link_by_name(link_name) @@ -531,7 +537,7 @@ class RSA(): ''' for l in links: for link in self.links_dict["optical_links"]: - print(f"tracking link info {link}") + #print(f"tracking link info {link}") if link["name"] == l: fib = link["optical_details"] #for f in self.links_dict[l]['fibers'].keys(): @@ -560,16 +566,16 @@ class RSA(): def get_link_by_name (self, key): for link in self.links_dict["optical_links"]: if link["name"] == key: - if debug: - print(link) + #if debug: + # print(link) break return link def get_fiber_details(self, link_key, fiber_id): for link in self.links_dict["optical_links"]: if link["name"] == link_key: - if debug: - print(link) + #if debug: + # print(link) for fib in link["optical_details"]: if fib["ID"] == fiber_id: return fib @@ -595,8 +601,8 @@ class RSA(): r_l = reverse_link(l) r_link = self.get_link_by_name(r_l) - if debug: - print(r_l) + #if debug: + # print(r_l) #for f in r_link["fibers"].keys(): r_fib = r_link["optical_details"] @@ -1262,7 +1268,7 @@ class RSA(): if len(existing_ob) > 0: #first checking if provided band id is passed - if preferred is not None: + if preferred is not None and preferred != "ANY": ob_id = int(preferred) if "is_active" in self.optical_bands[ob_id].keys(): is_active = self.optical_bands[ob_id]["is_active"] diff --git a/src/opticalcontroller/tools.py b/src/opticalcontroller/tools.py index dfca580f6cfe80646fa5de2c9388e499a5353029..98b7307835a1eb6c205c8366d32c6dc225201543 100644 --- a/src/opticalcontroller/tools.py +++ b/src/opticalcontroller/tools.py @@ -149,27 +149,36 @@ def get_slot_frequency(b, n): def get_side_slots_on_link(link, val, old_slots): - #link = l["optical_details"][band] - x = list(old_slots.keys()) - y = list(link.keys()) - keys = str_list_to_int(x) - keys.sort() - #print("AAAA") - #print(link, val, old_slots, keys) - #print(x) - starting_slot = keys[-1] + current_slots = str_list_to_int(list(old_slots.keys())) if isinstance(old_slots, dict) else sorted([ + int(slot) for slot in old_slots + ]) + available_slots = str_list_to_int(list(link.keys())) + if len(current_slots) == 0 or len(available_slots) == 0: + return [], 0 + + starting_slot = current_slots[-1] + 1 num = 0 res = [] - #print(starting_slot) - for slot_id in range(starting_slot, len(y)): - if link[y[slot_id]] == 1: + + for slot_id in available_slots: + if slot_id < starting_slot: + continue + + expected_slot = starting_slot + num + if slot_id != expected_slot: + return res, 0 + + if link[str(slot_id)] == 1: num += 1 - res.append(int(y[slot_id])) + res.append(slot_id) else: return res, 0 - if num == val or slot_id == len(y) - 1: + + if num == val or slot_id == available_slots[-1]: return res, num + return res, 0 + def frequency_converter(b, slots): l = len(slots) @@ -305,7 +314,8 @@ def update_optical_band (optical_bands,optical_band_id,band,link): key_list = optical_bands[optical_band_id][band].keys() corrected_slots=optical_bands[optical_band_id][band] if (len(key_list) < 20): - corrected_slots=correct_slot(optical_bands[optical_band_id][band]) + band_width = Nc if band == "c_slots" else Nl if band == "l_slots" else Ns + corrected_slots=correct_slot(optical_bands[optical_band_id][band], width=band_width) fib={} fib['c_slots']=link['optical_details']['c_slots'] @@ -362,4 +372,3 @@ def set_link_update (fib:dict,link:dict,test="updating"): print (f"setOpticalLink {err}") - diff --git a/src/pathcomp/frontend/service/algorithms/_Algorithm.py b/src/pathcomp/frontend/service/algorithms/_Algorithm.py index 121f0842770b5e455ad683c35a4d588ded0b7387..b639d1bdc0b4451921a76df160d031b895d7ac90 100644 --- a/src/pathcomp/frontend/service/algorithms/_Algorithm.py +++ b/src/pathcomp/frontend/service/algorithms/_Algorithm.py @@ -336,8 +336,17 @@ class _Algorithm: ] self.logger.debug('path_hops = {:s}'.format(str(path_hops))) - device_types = {v[0]['device_type'] for k,v in self.device_dict.items()} - DEVICES_BASIC_CONNECTION = { + #device_types = { + # v[0]['device_type'] + # for k,v in self.device_dict.items() + #} + device_types = { + self.device_dict[path_hop['device']][0]['device_type'] + for path_hop in path_hops + } + self.logger.debug('device_types = {:s}'.format(str(device_types))) + + DEVICES_BASIC_PACKET_CONNECTION = { DeviceTypeEnum.EMULATED_CLIENT.value, DeviceTypeEnum.EMULATED_COMPUTER.value, DeviceTypeEnum.EMULATED_DATACENTER.value, @@ -346,10 +355,23 @@ class _Algorithm: DeviceTypeEnum.PACKET_POP.value, DeviceTypeEnum.PACKET_ROUTER.value, } - self.logger.debug('device_types = {:s}'.format(str(device_types))) - self.logger.debug('DEVICES_BASIC_CONNECTION = {:s}'.format(str(DEVICES_BASIC_CONNECTION))) - is_basic_connection = device_types.issubset(DEVICES_BASIC_CONNECTION) + self.logger.debug('DEVICES_BASIC_PACKET_CONNECTION = {:s}'.format(str(DEVICES_BASIC_PACKET_CONNECTION))) + is_basic_packet_connection = device_types.issubset(DEVICES_BASIC_PACKET_CONNECTION) + self.logger.debug('is_basic_packet_connection = {:s}'.format(str(is_basic_packet_connection))) + + DEVICES_BASIC_OPTICAL_CONNECTION = { + DeviceTypeEnum.EMULATED_OPTICAL_ROADM.value, + DeviceTypeEnum.OPTICAL_ROADM.value, + DeviceTypeEnum.EMULATED_OPTICAL_TRANSPONDER.value, + DeviceTypeEnum.OPTICAL_TRANSPONDER.value, + } + self.logger.debug('DEVICES_BASIC_OPTICAL_CONNECTION = {:s}'.format(str(DEVICES_BASIC_OPTICAL_CONNECTION))) + is_basic_optical_connection = device_types.issubset(DEVICES_BASIC_OPTICAL_CONNECTION) + self.logger.debug('is_basic_optical_connection = {:s}'.format(str(is_basic_optical_connection))) + + is_basic_connection = is_basic_packet_connection or is_basic_optical_connection self.logger.debug('is_basic_connection = {:s}'.format(str(is_basic_connection))) + if is_basic_connection: self.logger.info('Assuming basic connections...') connections = convert_explicit_path_hops_to_plain_connection( diff --git a/src/pathcomp/frontend/service/algorithms/tools/ServiceTypes.py b/src/pathcomp/frontend/service/algorithms/tools/ServiceTypes.py index 3983586407a374797254dae0dc23802a05f2864b..7f78a7e32a05f4897149b90b0547f73fae8ce86a 100644 --- a/src/pathcomp/frontend/service/algorithms/tools/ServiceTypes.py +++ b/src/pathcomp/frontend/service/algorithms/tools/ServiceTypes.py @@ -46,6 +46,7 @@ OPTICAL_DEVICE_TYPES = { SERVICE_TYPE_L2NM = {ServiceTypeEnum.SERVICETYPE_L2NM} SERVICE_TYPE_L3NM = {ServiceTypeEnum.SERVICETYPE_L3NM} SERVICE_TYPE_LXNM = {ServiceTypeEnum.SERVICETYPE_L3NM, ServiceTypeEnum.SERVICETYPE_L2NM} +SERVICE_TYPE_OPTICAL = {ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY} SERVICE_TYPE_TAPI = {ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE} SERVICE_TYPE_IP_LINK = {ServiceTypeEnum.SERVICETYPE_IP_LINK} SERVICE_TYPE_IPOWDM = {ServiceTypeEnum.SERVICETYPE_IPOWDM} @@ -59,7 +60,9 @@ def get_service_type( device_type : DeviceTypeEnum, prv_service_type : ServiceTypeEnum ) -> ServiceTypeEnum: if device_type is DeviceTypeEnum.NCE: return ServiceTypeEnum.SERVICETYPE_L3NM - if device_type is DeviceTypeEnum.TERAFLOWSDN_CONTROLLER: return ServiceTypeEnum.SERVICETYPE_L3NM + if device_type is DeviceTypeEnum.TERAFLOWSDN_CONTROLLER: + if prv_service_type is not None: return prv_service_type + return ServiceTypeEnum.SERVICETYPE_L3NM if ( device_type in PACKET_DEVICE_TYPES and prv_service_type in SERVICE_TYPE_LXNM @@ -69,6 +72,10 @@ def get_service_type( prv_service_type in SERVICE_TYPE_IP_LINK ): return prv_service_type if device_type in L2_DEVICE_TYPES: return ServiceTypeEnum.SERVICETYPE_L2NM + if ( + device_type in OPTICAL_DEVICE_TYPES and + prv_service_type in SERVICE_TYPE_OPTICAL + ): return prv_service_type if device_type in OPTICAL_DEVICE_TYPES: return ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE if device_type in NETWORK_DEVICE_TYPES: return prv_service_type if ( diff --git a/src/service/service/ServiceServiceServicerImpl.py b/src/service/service/ServiceServiceServicerImpl.py index 272174d9924baad575493b9617a7acfd2035dc2f..8bdb06c0cbd2a8898bddbb4bc76d7b297f24992b 100644 --- a/src/service/service/ServiceServiceServicerImpl.py +++ b/src/service/service/ServiceServiceServicerImpl.py @@ -278,7 +278,7 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): ports = [] for endpoint_id in service.service_endpoint_ids: endpoint_device_uuid = endpoint_id.device_id.device_uuid.uuid - if "." or "MGON" in endpoint_device_uuid: + if "." in endpoint_device_uuid or "MGON" in endpoint_device_uuid: endpoint_device_name = endpoint_device_uuid else: endpoint_device_name = device_names[endpoint_device_uuid] diff --git a/src/service/service/service_handlers/__init__.py b/src/service/service/service_handlers/__init__.py index 9535e6ba5664886a84918c89526229977e6ded09..5b31b7de081f2860cf3d87fe059d7b0641fbfe82 100644 --- a/src/service/service/service_handlers/__init__.py +++ b/src/service/service/service_handlers/__init__.py @@ -186,7 +186,8 @@ SERVICE_HANDLERS = [ FilterFieldEnum.SERVICE_TYPE : ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY, FilterFieldEnum.DEVICE_DRIVER : [ DeviceDriverEnum.DEVICEDRIVER_OC, - DeviceDriverEnum.DEVICEDRIVER_OPENROADM + DeviceDriverEnum.DEVICEDRIVER_OPENROADM, + DeviceDriverEnum.DEVICEDRIVER_UNDEFINED, ], } ]), diff --git a/src/service/service/tools/OpticalTools.py b/src/service/service/tools/OpticalTools.py index 99261647e0d8dbb26295b7c74a96ff9868ccebac..2f1f7acf03a683ddd4495f2c5a5a2dfa4a2ce58c 100644 --- a/src/service/service/tools/OpticalTools.py +++ b/src/service/service/tools/OpticalTools.py @@ -13,30 +13,31 @@ # limitations under the License. # -from common.method_wrappers.ServiceExceptions import NotFoundException -from service.service.service_handler_api.SettingsHandler import SettingsHandler import functools, json, logging, requests, uuid -from typing import List -from context.client.ContextClient import ContextClient -from common.Constants import ServiceNameEnum -from common.tools.context_queries.OpticalConfig import ( find_optical_band) +from typing import Dict, List, Tuple +from common.method_wrappers.ServiceExceptions import NotFoundException from common.proto.context_pb2 import( - Device, DeviceId, Service, Connection, EndPointId, TopologyId, ContextId, Uuid, - ConfigRule, ConfigActionEnum, ConfigRule_Custom, Empty,OpticalBandId,OpticalBand + ConfigActionEnum, ConfigRule, ConfigRule_Custom, Connection, ContextId, + Device, DeviceId, Empty, EndPointId, OpticalBand, OpticalBandId, OpticalBandList, + Service, TopologyId, Uuid ) from common.proto.pathcomp_pb2 import PathCompReply +from common.tools.context_queries.OpticalConfig import find_optical_band +from common.Constants import ServiceNameEnum from common.Settings import ( ENVVAR_SUFIX_SERVICE_BASEURL_HTTP, ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, find_environment_variables, get_env_var_name ) +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.Context import json_context_id +from common.tools.object_factory.Topology import json_topology_id +from context.client.ContextClient import ContextClient +from service.service.service_handler_api.SettingsHandler import SettingsHandler from service.service.tools.replies import ( - reply_uni_txt - , optical_band_uni_txt - , reply_bid_txt - , optical_band_bid_txt + reply_uni_txt, optical_band_uni_txt, reply_bid_txt, optical_band_bid_txt ) -log = logging.getLogger(__name__) +LOGGER = logging.getLogger(__name__) TESTING = False @@ -56,68 +57,66 @@ def get_optical_controller_base_url() -> str: VAR_NAME_OPTICAL_CTRL_PORT, ]) base_url = settings.get(VAR_NAME_OPTICAL_CTRL_BASEURL_HTTP) - if base_url is not None: - log.debug('Optical Controller: base_url={:s}'.format(str(base_url))) - return base_url + if base_url is None: + schema = settings.get(VAR_NAME_OPTICAL_CTRL_SCHEMA, 'http') + host = settings.get(VAR_NAME_OPTICAL_CTRL_HOST) + port = int(settings.get(VAR_NAME_OPTICAL_CTRL_PORT, 80)) - host = settings.get(VAR_NAME_OPTICAL_CTRL_HOST) - port = int(settings.get(VAR_NAME_OPTICAL_CTRL_PORT, 80)) + if schema is None or host is None or port is None: + MSG = 'Missing settings for Optical Controller: settings={:s}' + raise Exception(MSG.format(str(settings))) - MSG = 'Optical Controller not found: settings={:s}' - if host is None: raise Exception(MSG.format(str(settings))) - if port is None: raise Exception(MSG.format(str(settings))) + base_url = OPTICAL_CTRL_BASE_URL.format(schema, host, port) - schema = settings.get(VAR_NAME_OPTICAL_CTRL_SCHEMA, 'http') - base_url = OPTICAL_CTRL_BASE_URL.format(schema, host, port) - log.debug('Optical Controller: base_url={:s}'.format(str(base_url))) + LOGGER.debug('Optical Controller: base_url={:s}'.format(str(base_url))) return base_url -def get_uuids_from_names(devices: List[Device], device_name: str, port_name: str): - device_uuid = "" - port_uuid = "" +def get_uuids_from_names( + devices : List[Device], device_name : str, port_name : str +) -> Tuple[str, str]: for device in devices: - if device.name == device_name: - device_uuid = device.device_id.device_uuid.uuid - for ep in device.device_endpoints: - if ep.name == port_name: - port_uuid = ep.endpoint_id.endpoint_uuid.uuid - return device_uuid, port_uuid - return "", "" - - -def get_names_from_uuids(devices: List[Device], device_uuid: str, port_uuid: str): - device_name = "" - port_name = "" + if device.name != device_name: continue + device_uuid = device.device_id.device_uuid.uuid + for ep in device.device_endpoints: + if ep.name != port_name: continue + port_uuid = ep.endpoint_id.endpoint_uuid.uuid + return device_uuid, port_uuid + return '', '' + + +def get_names_from_uuids( + devices : List[Device], device_uuid : str, port_uuid : str +) -> Tuple[str, str]: for device in devices: - if device.device_id.device_uuid.uuid == device_uuid: - device_name = device.name - for ep in device.device_endpoints: - if ep.endpoint_id.endpoint_uuid.uuid == port_uuid: - port_name = ep.name - return device_name, port_name - return "", "" - - -def get_device_name_from_uuid(devices: List[Device], device_uuid: str): - device_name = "" - + if device.device_id.device_uuid.uuid != device_uuid: continue + device_name = device.name + for ep in device.device_endpoints: + if ep.endpoint_id.endpoint_uuid.uuid != port_uuid: continue + port_name = ep.name + return device_name, port_name + return '', '' + + +def get_device_name_from_uuid( + devices : List[Device], device_uuid : str +) -> str: for device in devices: - if device.device_id.device_uuid.uuid == device_uuid: - device_name = device.name - return device_name - return "" + if device.device_id.device_uuid.uuid != device_uuid: continue + device_name = device.name + return device_name + return '' -def refresh_opticalcontroller(topology_id : dict): - topo_id_str = topology_id["topology_uuid"]["uuid"] - cxt_id_str = topology_id["context_id"]["context_uuid"]["uuid"] - headers = {"Content-Type": "application/json"} +def refresh_opticalcontroller(topology_id : Dict) -> None: + topo_id_str = topology_id['topology_uuid']['uuid'] + cxt_id_str = topology_id['context_id']['context_uuid']['uuid'] + headers = {'Content-Type': 'application/json'} base_url = get_optical_controller_base_url() - urlx = "{:s}/GetTopology/{:s}/{:s}".format(base_url, cxt_id_str, topo_id_str) + urlx = '{:s}/GetTopology/{:s}/{:s}'.format(base_url, cxt_id_str, topo_id_str) res = requests.get(urlx, headers=headers) if res is not None: - log.debug(f"GetTopology Response {res}") + LOGGER.debug(f"GetTopology Response {res}") def reconfig_flex_lightpath(flow_id) -> str: @@ -127,7 +126,7 @@ def reconfig_flex_lightpath(flow_id) -> str: base_url = get_optical_controller_base_url() urlx = "{:s}/ReconfigFlexLightpath/{}".format(base_url, flow_id) r = requests.put(urlx, headers=headers) - print(f"reconfig {r}") + LOGGER.debug(f"reconfig {r}") reply = r.text return reply else: @@ -158,7 +157,7 @@ def add_flex_lightpath(src, dst, bitrate, bidir, pref, ob_band, dj_optical_band_ else: urlx = "{:s}/AddFlexLightpath/{:s}/{:s}/{:s}/{:s}/{:s}/{:s}/{:s}".format(base_url, src, dst, str(bitrate), str(prefs), str(bidir), str(ob_band), str(dj_optical_band_id)) r = requests.put(urlx, headers=headers) - print(f"addpathlight {r}") + LOGGER.debug(f"addpathlight {r}") reply = r.text return reply else: @@ -189,7 +188,7 @@ def add_lightpath(src, dst, bitrate, bidir) -> str: bidir = 1 urlx = "{:s}/AddLightpath/{:s}/{:s}/{:s}/{:s}".format(base_url, src, dst, str(bitrate), str(bidir)) r = requests.put(urlx, headers=headers) - print(f"addpathlight {r}") + LOGGER.debug(f"addpathlight {r}") reply = r.text return reply else: @@ -202,8 +201,8 @@ def add_lightpath(src, dst, bitrate, bidir) -> str: def get_optical_band(idx) -> str: if not TESTING: base_url = get_optical_controller_base_url() - urlx = "{:s}/GetOpticalBand/{:s}".format(base_url, str(idx)) - headers = {"Content-Type": "application/json"} + urlx = '{:s}/GetOpticalBand/{:s}'.format(base_url, str(idx)) + headers = {'Content-Type': 'application/json'} r = requests.get(urlx, headers=headers) reply = r.text return reply @@ -304,16 +303,16 @@ def adapt_reply_ob(devices, service, reply_json, context_id, topology_id, optica bidir_ob = ob["bidir"] # in case the service is built upon existed optical band , don't clacluate the endpoints of it for devxb in ob["flows"].keys(): - log.debug("optical-band device {}".format(devxb)) + LOGGER.debug("optical-band device {}".format(devxb)) in_end_point_b = "0" out_end_point_b = "0" in_end_point_f = ob["flows"][devxb]["f"]["in"] out_end_point_f = ob["flows"][devxb]["f"]["out"] - log.debug("optical-band ports {}, {}".format(in_end_point_f, out_end_point_f)) + LOGGER.debug("optical-band ports {}, {}".format(in_end_point_f, out_end_point_f)) if bidir_ob: in_end_point_b = ob["flows"][devxb]["b"]["in"] out_end_point_b = ob["flows"][devxb]["b"]["out"] - log.debug("optical-band ports {}, {}".format(in_end_point_b, out_end_point_b)) + LOGGER.debug("optical-band ports {}, {}".format(in_end_point_b, out_end_point_b)) #if (in_end_point_f == "0" or out_end_point_f == "0") and (in_end_point_b == "0" or out_end_point_b == "0"): if in_end_point_f != "0": d_ob, p_ob = get_uuids_from_names(devices, devxb, in_end_point_f) @@ -321,7 +320,7 @@ def adapt_reply_ob(devices, service, reply_json, context_id, topology_id, optica end_point_b = EndPointId(topology_id=topo, device_id=DeviceId(device_uuid=Uuid(uuid=d_ob)), endpoint_uuid=Uuid(uuid=p_ob)) connection_ob.path_hops_endpoint_ids.add().CopyFrom(end_point_b) else: - log.info("no map device port for device {} port {}".format(devxb, in_end_point_f)) + LOGGER.info("no map device port for device {} port {}".format(devxb, in_end_point_f)) if out_end_point_f != "0": d_ob, p_ob = get_uuids_from_names(devices, devxb, out_end_point_f) @@ -329,25 +328,25 @@ def adapt_reply_ob(devices, service, reply_json, context_id, topology_id, optica end_point_b = EndPointId(topology_id=topo, device_id=DeviceId(device_uuid=Uuid(uuid=d_ob)), endpoint_uuid=Uuid(uuid=p_ob)) connection_ob.path_hops_endpoint_ids.add().CopyFrom(end_point_b) else: - log.info("no map device port for device {} port {}".format(devxb, out_end_point_f)) + LOGGER.info("no map device port for device {} port {}".format(devxb, out_end_point_f)) if in_end_point_b != "0": d_ob, p_ob = get_uuids_from_names(devices, devxb, in_end_point_b) if d_ob != "" and p_ob != "": end_point_b = EndPointId(topology_id=topo, device_id=DeviceId(device_uuid=Uuid(uuid=d_ob)), endpoint_uuid=Uuid(uuid=p_ob)) connection_ob.path_hops_endpoint_ids.add().CopyFrom(end_point_b) else: - log.info("no map device port for device {} port {}".format(devxb, in_end_point_b)) + LOGGER.info("no map device port for device {} port {}".format(devxb, in_end_point_b)) if out_end_point_b != "0": d_ob, p_ob = get_uuids_from_names(devices, devxb, out_end_point_b) if d_ob != "" and p_ob != "": end_point_b = EndPointId(topology_id=topo, device_id=DeviceId(device_uuid=Uuid(uuid=d_ob)), endpoint_uuid=Uuid(uuid=p_ob)) connection_ob.path_hops_endpoint_ids.add().CopyFrom(end_point_b) else: - log.info("no map device port for device {} port {}".format(devxb, out_end_point_b)) - log.debug("optical-band connection {}".format(connection_ob)) + LOGGER.info("no map device port for device {} port {}".format(devxb, out_end_point_b)) + LOGGER.debug("optical-band connection {}".format(connection_ob)) #check that list of endpoints is not empty if connection_ob is not None and len(connection_ob.path_hops_endpoint_ids) == 0: - log.debug("deleting empty optical-band connection") + LOGGER.debug("deleting empty optical-band connection") opt_reply.connections.remove(connection_ob) ''' @@ -382,16 +381,16 @@ def adapt_reply_ob(devices, service, reply_json, context_id, topology_id, optica -def adapt_reply(devices, service, reply_json, context_id, topology_id, optical_band_txt) -> PathCompReply: +def adapt_reply( + devices, service, reply_json, context_id : str, topology_id : str, optical_band_txt +) -> PathCompReply: opt_reply = PathCompReply() - topo = TopologyId( - context_id=ContextId(context_uuid=Uuid(uuid=context_id)), - topology_uuid=Uuid(uuid=topology_id) - ) + topo = TopologyId(**json_topology_id(topology_id, context_id=json_context_id(context_id))) + #add optical band connection first - rules_ob= [] + rules_ob = [] ob_id = 0 - connection_ob=None + connection_ob = None r = reply_json if "parent_opt_band" in r.keys(): @@ -436,16 +435,16 @@ def adapt_reply(devices, service, reply_json, context_id, topology_id, optical_b # in case the service is built upon existed optical band , don't clacluate the endpoints of it if new_ob != 2 : for devxb in ob["flows"].keys(): - log.debug("optical-band device {}".format(devxb)) + LOGGER.debug("optical-band device {}".format(devxb)) in_end_point_b = "0" out_end_point_b = "0" in_end_point_f = ob["flows"][devxb]["f"]["in"] out_end_point_f = ob["flows"][devxb]["f"]["out"] - log.debug("optical-band ports {}, {}".format(in_end_point_f, out_end_point_f)) + LOGGER.debug("optical-band ports {}, {}".format(in_end_point_f, out_end_point_f)) if bidir_ob: in_end_point_b = ob["flows"][devxb]["b"]["in"] out_end_point_b = ob["flows"][devxb]["b"]["out"] - log.debug("optical-band ports {}, {}".format(in_end_point_b, out_end_point_b)) + LOGGER.debug("optical-band ports {}, {}".format(in_end_point_b, out_end_point_b)) #if (in_end_point_f == "0" or out_end_point_f == "0") and (in_end_point_b == "0" or out_end_point_b == "0"): if in_end_point_f != "0": d_ob, p_ob = get_uuids_from_names(devices, devxb, in_end_point_f) @@ -453,7 +452,7 @@ def adapt_reply(devices, service, reply_json, context_id, topology_id, optical_b end_point_b = EndPointId(topology_id=topo, device_id=DeviceId(device_uuid=Uuid(uuid=d_ob)), endpoint_uuid=Uuid(uuid=p_ob)) connection_ob.path_hops_endpoint_ids.add().CopyFrom(end_point_b) else: - log.info("no map device port for device {} port {}".format(devxb, in_end_point_f)) + LOGGER.info("no map device port for device {} port {}".format(devxb, in_end_point_f)) if out_end_point_f != "0": d_ob, p_ob = get_uuids_from_names(devices, devxb, out_end_point_f) @@ -461,70 +460,70 @@ def adapt_reply(devices, service, reply_json, context_id, topology_id, optical_b end_point_b = EndPointId(topology_id=topo, device_id=DeviceId(device_uuid=Uuid(uuid=d_ob)), endpoint_uuid=Uuid(uuid=p_ob)) connection_ob.path_hops_endpoint_ids.add().CopyFrom(end_point_b) else: - log.info("no map device port for device {} port {}".format(devxb, out_end_point_f)) + LOGGER.info("no map device port for device {} port {}".format(devxb, out_end_point_f)) if in_end_point_b != "0": d_ob, p_ob = get_uuids_from_names(devices, devxb, in_end_point_b) if d_ob != "" and p_ob != "": end_point_b = EndPointId(topology_id=topo, device_id=DeviceId(device_uuid=Uuid(uuid=d_ob)), endpoint_uuid=Uuid(uuid=p_ob)) connection_ob.path_hops_endpoint_ids.add().CopyFrom(end_point_b) else: - log.info("no map device port for device {} port {}".format(devxb, in_end_point_b)) + LOGGER.info("no map device port for device {} port {}".format(devxb, in_end_point_b)) if out_end_point_b != "0": d_ob, p_ob = get_uuids_from_names(devices, devxb, out_end_point_b) if d_ob != "" and p_ob != "": end_point_b = EndPointId(topology_id=topo, device_id=DeviceId(device_uuid=Uuid(uuid=d_ob)), endpoint_uuid=Uuid(uuid=p_ob)) connection_ob.path_hops_endpoint_ids.add().CopyFrom(end_point_b) else: - log.info("no map device port for device {} port {}".format(devxb, out_end_point_b)) - log.debug("optical-band connection {}".format(connection_ob)) + LOGGER.info("no map device port for device {} port {}".format(devxb, out_end_point_b)) + LOGGER.debug("optical-band connection {}".format(connection_ob)) connection_f = add_connection_to_reply(opt_reply) connection_f.connection_id.connection_uuid.uuid = str(uuid.uuid4()) connection_f.service_id.CopyFrom(service.service_id) for devx in r["flows"].keys(): - log.debug("lightpath device {}".format(devx)) + LOGGER.debug("lightpath device {}".format(devx)) in_end_point_b = "0" out_end_point_b = "0" in_end_point_f = r["flows"][devx]["f"]["in"] out_end_point_f = r["flows"][devx]["f"]["out"] - log.debug("lightpath ports {}, {}".format(in_end_point_f, out_end_point_f)) + LOGGER.debug("lightpath ports {}, {}".format(in_end_point_f, out_end_point_f)) if bidir_f: in_end_point_b = r["flows"][devx]["b"]["in"] out_end_point_b = r["flows"][devx]["b"]["out"] - log.debug("lightpath ports {}, {}".format(in_end_point_b, out_end_point_b)) + LOGGER.debug("lightpath ports {}, {}".format(in_end_point_b, out_end_point_b)) if in_end_point_f != "0": d, p = get_uuids_from_names(devices, devx, in_end_point_f) if d != "" and p != "": end_point = EndPointId(topology_id=topo, device_id=DeviceId(device_uuid=Uuid(uuid=d)), endpoint_uuid=Uuid(uuid=p)) connection_f.path_hops_endpoint_ids.add().CopyFrom(end_point) else: - log.info("no map device port for device {} port {}".format(devx, in_end_point_f)) + LOGGER.info("no map device port for device {} port {}".format(devx, in_end_point_f)) if out_end_point_f != "0": d, p = get_uuids_from_names(devices, devx, out_end_point_f) if d != "" and p != "": end_point = EndPointId(topology_id=topo, device_id=DeviceId(device_uuid=Uuid(uuid=d)), endpoint_uuid=Uuid(uuid=p)) connection_f.path_hops_endpoint_ids.add().CopyFrom(end_point) else: - log.info("no map device port for device {} port {}".format(devx, out_end_point_f)) + LOGGER.info("no map device port for device {} port {}".format(devx, out_end_point_f)) if in_end_point_b != "0": d, p = get_uuids_from_names(devices, devx, in_end_point_b) if d != "" and p != "": end_point = EndPointId(topology_id=topo, device_id=DeviceId(device_uuid=Uuid(uuid=d)), endpoint_uuid=Uuid(uuid=p)) connection_f.path_hops_endpoint_ids.add().CopyFrom(end_point) else: - log.info("no map device port for device {} port {}".format(devx, in_end_point_b)) + LOGGER.info("no map device port for device {} port {}".format(devx, in_end_point_b)) if out_end_point_b != "0": d, p = get_uuids_from_names(devices, devx, out_end_point_b) if d != "" and p != "": end_point = EndPointId(topology_id=topo, device_id=DeviceId(device_uuid=Uuid(uuid=d)), endpoint_uuid=Uuid(uuid=p)) connection_f.path_hops_endpoint_ids.add().CopyFrom(end_point) else: - log.info("no map device port for device {} port {}".format(devx, out_end_point_b)) + LOGGER.info("no map device port for device {} port {}".format(devx, out_end_point_b)) #check that list of endpoints is not empty if connection_ob is not None and len(connection_ob.path_hops_endpoint_ids) == 0: - log.debug("deleting empty optical-band connection") + LOGGER.debug("deleting empty optical-band connection") opt_reply.connections.remove(connection_ob) #inizialize custom optical parameters @@ -558,73 +557,79 @@ def adapt_reply(devices, service, reply_json, context_id, topology_id, optical_b return opt_reply - def add_service_to_reply(reply : PathCompReply, service : Service) -> Service: service_x = reply.services.add() service_x.CopyFrom(service) return service_x -def add_connection_to_reply(reply : PathCompReply) -> Connection: - conn = reply.connections.add() - return conn +def add_connection_to_reply(reply : PathCompReply) -> Connection: + return reply.connections.add() - -def update_config_rules (service:Service,config_to_update:dict): +def update_config_rules( + service : Service, config_to_update : Dict +) -> Service: config_rules = service.service_config.config_rules if len(config_rules) == 0 : return service - for key,new_value in config_to_update.items(): - for c in config_rules: - if c.custom.resource_key == key : - c.custom.resource_value = json.dumps(new_value) - - - return service + for key, new_value in config_to_update.items(): + for c in config_rules: + if c.custom.resource_key != key: continue + c.custom.resource_value = json.dumps(new_value) + return service - - -def extend_optical_band (reply,optical_band_text)->Service : - logging.debug(f"optical-band extended {reply}") - logging.debug(f"optical-band_text {optical_band_text}") - optical_band_res= json.loads(optical_band_text) - if 'optical_band_id' not in optical_band_res: raise KeyError(f"opticalband id not found in the reply") - ob_index =optical_band_res['optical_band_id'] - band=optical_band_res['band'] - frequency=optical_band_res['freq'] - opticalband=find_optical_band(ob_index=ob_index) - if opticalband is None : - raise NotFoundException(f"Optical Band is not found ",extra_details=[ - f"The requested opticla band for index {ob_index} is not found" +def extend_optical_band(reply, optical_band_text) -> Service: + LOGGER.debug('[extend_optical_band] optical-band extended {:s}'.format(str(reply))) + LOGGER.debug('[extend_optical_band] optical-band_text {:s}'.format(str(optical_band_text))) + + optical_band_res = json.loads(optical_band_text) + if 'optical_band_id' not in optical_band_res: + MSG = 'optical_band_id not found in reply({:s})/optical_band_text({:s})' + raise KeyError(MSG.format(str(reply), str(optical_band_text))) + + context_client = ContextClient() + optical_bands : OpticalBandList = context_client.GetOpticalBand(Empty()) + LOGGER.warning('GetOpticalBand result: {:s}'.format(grpc_message_to_json_string(optical_bands))) + + ob_index = optical_band_res['optical_band_id'] + optical_band = find_optical_band(ob_index=ob_index) + if optical_band is None: + raise NotFoundException('OpticalBand', str(ob_index), extra_details=[ + 'optical_band_text={:s}'.format(str(optical_band_text)), + 'reply={:s}'.format(str(reply)) ]) - - service = opticalband.service - connection_uuid = opticalband.connection_id.connection_uuid.uuid - + + service = optical_band.service + connection_uuid = optical_band.connection_id.connection_uuid.uuid + setting_handler = SettingsHandler(service.service_config) - config_to_update = {} - setting_key = '/settings-ob_{}'.format(connection_uuid) - config = setting_handler.get(setting_key) - - - config.value['band']=band - config.value['frequency']=frequency - config.value['low-freq']= int(frequency - (band/2)) - config.value['up-freq']= int(frequency + (band/2)) - - logging.debug(f"before setting the config {service}") - config_to_update[setting_key]=config.value - setting_key = '/settings' - config = setting_handler.get(setting_key) - config.value['ob-expanded']=1 - config_to_update[setting_key]=config.value - logging.debug(f"config_to_update {config_to_update}") - service = update_config_rules(service,config_to_update) + setting_key_svc = '/settings' + setting_key_ob = '/settings-ob_{:s}'.format(connection_uuid) + + config_svc = setting_handler.get(setting_key_svc) + config_ob = setting_handler.get(setting_key_ob ) + + band = optical_band_res['band'] + frequency = optical_band_res['freq'] + config_ob.value['band' ] = band + config_ob.value['frequency'] = frequency + config_ob.value['low-freq' ] = int(frequency - (band/2)) + config_ob.value['up-freq' ] = int(frequency + (band/2)) + + config_svc.value['ob-expanded'] = 1 + + MSG = '[extend_optical_band] service before setting config {:s}' + LOGGER.debug(MSG.format(grpc_message_to_json_string(service))) + config_to_update = { + setting_key_svc : config_svc.value, + setting_key_ob : config_ob.value + } + + MSG = '[extend_optical_band] config_to_update={:s}' + LOGGER.debug(MSG.format(str(config_to_update))) + + service = update_config_rules(service, config_to_update) return service - - - - diff --git a/src/tests/.gitlab-ci.yml b/src/tests/.gitlab-ci.yml index 144488cbd64c21719ea0845fa6fc2f064bed74db..01fb05eeaf0b5b4b0bd9945da223141df5e6ba4e 100644 --- a/src/tests/.gitlab-ci.yml +++ b/src/tests/.gitlab-ci.yml @@ -23,7 +23,7 @@ include: - local: '/src/tests/eucnc24/.gitlab-ci.yml' #- local: '/src/tests/ofc25-camara-agg-net-controller/.gitlab-ci.yml' #- local: '/src/tests/ofc25-camara-e2e-controller/.gitlab-ci.yml' - #- local: '/src/tests/ofc25/.gitlab-ci.yml' + - local: '/src/tests/ofc25/.gitlab-ci.yml' - local: '/src/tests/ryu-openflow/.gitlab-ci.yml' - local: '/src/tests/qkd_end2end/.gitlab-ci.yml' - local: '/src/tests/acl_end2end/.gitlab-ci.yml' diff --git a/src/tests/oeccpsc22/nginx-ingress-http-dom1.yaml b/src/tests/oeccpsc22/nginx-ingress-http-dom1.yaml index c497d2e73d02ecaf99f783b27be386322fa3a45e..166d17297d5dad138eed2dbce38f83b59b47bbfb 100644 --- a/src/tests/oeccpsc22/nginx-ingress-http-dom1.yaml +++ b/src/tests/oeccpsc22/nginx-ingress-http-dom1.yaml @@ -15,43 +15,122 @@ apiVersion: networking.k8s.io/v1 kind: Ingress metadata: - name: tfs-ingress-dom1 + name: tfs-ingress-dom1-web annotations: - nginx.ingress.kubernetes.io/rewrite-target: /$2 + nginx.ingress.kubernetes.io/rewrite-target: "/$2" + nginx.ingress.kubernetes.io/use-regex: "true" + nginx.ingress.kubernetes.io/limit-rps: "50" # max requests per second per source IP + nginx.ingress.kubernetes.io/limit-connections: "50" # max concurrent connections per source IP + nginx.ingress.kubernetes.io/proxy-connect-timeout: "60" # max timeout for connecting to server + nginx.ingress.kubernetes.io/proxy-send-timeout: "3600" # max timeout between two successive read operations + nginx.ingress.kubernetes.io/proxy-read-timeout: "3600" # max timeout between two successive write operations spec: ingressClassName: tfs-ingress-class-dom1 rules: - http: paths: - path: /webui(/|$)(.*) - pathType: Prefix + pathType: ImplementationSpecific backend: service: name: webuiservice port: number: 8004 - path: /grafana(/|$)(.*) - pathType: Prefix + pathType: ImplementationSpecific backend: service: name: webuiservice port: number: 3000 - - path: /()(restconf/.*) +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: tfs-ingress-dom1-nbi + annotations: + # Enable websocket services and configure sticky cookies (seems not to work) + #nginx.org/websocket-services: "nbiservice" + #nginx.org/sticky-cookie-services: "serviceName=nbiservice tfs-nbi-session expires=1h path=/socket.io" + + # Enable sticky sessions (use same backend for all connections + # originated by a specific client, identified through its cookie) + nginx.ingress.kubernetes.io/affinity: "cookie" + nginx.ingress.kubernetes.io/affinity-mode: "persistent" + nginx.ingress.kubernetes.io/session-cookie-name: "tfs-nbi-session" + nginx.ingress.kubernetes.io/session-cookie-path: "/socket.io" + nginx.ingress.kubernetes.io/session-cookie-expires: "3600" + nginx.ingress.kubernetes.io/session-cookie-change-on-failure: "true" + + nginx.ingress.kubernetes.io/limit-rps: "50" # max requests per second per source IP + nginx.ingress.kubernetes.io/limit-connections: "50" # max concurrent connections per source IP + nginx.ingress.kubernetes.io/proxy-connect-timeout: "60" # max timeout for connecting to server + + # Enable long-lived connections, required for websocket/socket.io streams + nginx.ingress.kubernetes.io/proxy-send-timeout: "3600" # max timeout between two successive read operations + nginx.ingress.kubernetes.io/proxy-read-timeout: "3600" # max timeout between two successive write operations +spec: + ingressClassName: tfs-ingress-class-dom1 + rules: + - http: + paths: + - path: /.well-known + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /restconf + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /socket.io + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /tfs-api + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /bmw + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /qkd_app + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /camara pathType: Prefix backend: service: name: nbiservice port: number: 8080 - - path: /()(tfs-api/.*) + - path: /agent-probes pathType: Prefix backend: service: name: nbiservice port: number: 8080 - - path: /()(bmw/.*) + - path: /osm-api pathType: Prefix backend: service: diff --git a/src/tests/oeccpsc22/nginx-ingress-http-dom2.yaml b/src/tests/oeccpsc22/nginx-ingress-http-dom2.yaml index 985a1be9e8699701819471cc8e5b175eb78afe66..7711c38ba63957112f83ae96813d446c28058414 100644 --- a/src/tests/oeccpsc22/nginx-ingress-http-dom2.yaml +++ b/src/tests/oeccpsc22/nginx-ingress-http-dom2.yaml @@ -15,43 +15,122 @@ apiVersion: networking.k8s.io/v1 kind: Ingress metadata: - name: tfs-ingress-dom2 + name: tfs-ingress-dom2-web annotations: - nginx.ingress.kubernetes.io/rewrite-target: /$2 + nginx.ingress.kubernetes.io/rewrite-target: "/$2" + nginx.ingress.kubernetes.io/use-regex: "true" + nginx.ingress.kubernetes.io/limit-rps: "50" # max requests per second per source IP + nginx.ingress.kubernetes.io/limit-connections: "50" # max concurrent connections per source IP + nginx.ingress.kubernetes.io/proxy-connect-timeout: "60" # max timeout for connecting to server + nginx.ingress.kubernetes.io/proxy-send-timeout: "3600" # max timeout between two successive read operations + nginx.ingress.kubernetes.io/proxy-read-timeout: "3600" # max timeout between two successive write operations spec: ingressClassName: tfs-ingress-class-dom2 rules: - http: paths: - path: /webui(/|$)(.*) - pathType: Prefix + pathType: ImplementationSpecific backend: service: name: webuiservice port: number: 8004 - path: /grafana(/|$)(.*) - pathType: Prefix + pathType: ImplementationSpecific backend: service: name: webuiservice port: number: 3000 - - path: /()(restconf/.*) +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: tfs-ingress-dom2-nbi + annotations: + # Enable websocket services and configure sticky cookies (seems not to work) + #nginx.org/websocket-services: "nbiservice" + #nginx.org/sticky-cookie-services: "serviceName=nbiservice tfs-nbi-session expires=1h path=/socket.io" + + # Enable sticky sessions (use same backend for all connections + # originated by a specific client, identified through its cookie) + nginx.ingress.kubernetes.io/affinity: "cookie" + nginx.ingress.kubernetes.io/affinity-mode: "persistent" + nginx.ingress.kubernetes.io/session-cookie-name: "tfs-nbi-session" + nginx.ingress.kubernetes.io/session-cookie-path: "/socket.io" + nginx.ingress.kubernetes.io/session-cookie-expires: "3600" + nginx.ingress.kubernetes.io/session-cookie-change-on-failure: "true" + + nginx.ingress.kubernetes.io/limit-rps: "50" # max requests per second per source IP + nginx.ingress.kubernetes.io/limit-connections: "50" # max concurrent connections per source IP + nginx.ingress.kubernetes.io/proxy-connect-timeout: "60" # max timeout for connecting to server + + # Enable long-lived connections, required for websocket/socket.io streams + nginx.ingress.kubernetes.io/proxy-send-timeout: "3600" # max timeout between two successive read operations + nginx.ingress.kubernetes.io/proxy-read-timeout: "3600" # max timeout between two successive write operations +spec: + ingressClassName: tfs-ingress-class-dom2 + rules: + - http: + paths: + - path: /.well-known + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /restconf + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /socket.io + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /tfs-api + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /bmw + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /qkd_app + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /camara pathType: Prefix backend: service: name: nbiservice port: number: 8080 - - path: /()(tfs-api/.*) + - path: /agent-probes pathType: Prefix backend: service: name: nbiservice port: number: 8080 - - path: /()(bmw/.*) + - path: /osm-api pathType: Prefix backend: service: diff --git a/src/tests/ofc23/tfs-ingress-child.yaml b/src/tests/ofc23/tfs-ingress-child.yaml index 79793d369f853ea918445680f2b43e7f0bb52bb8..fdfdd4cf78e3e7c3f8c8e8714d1264e4598eb767 100644 --- a/src/tests/ofc23/tfs-ingress-child.yaml +++ b/src/tests/ofc23/tfs-ingress-child.yaml @@ -15,43 +15,122 @@ apiVersion: networking.k8s.io/v1 kind: Ingress metadata: - name: tfs-ingress-child + name: tfs-ingress-child-web annotations: - nginx.ingress.kubernetes.io/rewrite-target: /$2 + nginx.ingress.kubernetes.io/rewrite-target: "/$2" + nginx.ingress.kubernetes.io/use-regex: "true" + nginx.ingress.kubernetes.io/limit-rps: "50" # max requests per second per source IP + nginx.ingress.kubernetes.io/limit-connections: "50" # max concurrent connections per source IP + nginx.ingress.kubernetes.io/proxy-connect-timeout: "60" # max timeout for connecting to server + nginx.ingress.kubernetes.io/proxy-send-timeout: "3600" # max timeout between two successive read operations + nginx.ingress.kubernetes.io/proxy-read-timeout: "3600" # max timeout between two successive write operations spec: ingressClassName: tfs-ingress-class-child rules: - http: paths: - path: /webui(/|$)(.*) - pathType: Prefix + pathType: ImplementationSpecific backend: service: name: webuiservice port: number: 8004 - path: /grafana(/|$)(.*) - pathType: Prefix + pathType: ImplementationSpecific backend: service: name: webuiservice port: number: 3000 - - path: /()(restconf/.*) +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: tfs-ingress-child-nbi + annotations: + # Enable websocket services and configure sticky cookies (seems not to work) + #nginx.org/websocket-services: "nbiservice" + #nginx.org/sticky-cookie-services: "serviceName=nbiservice tfs-nbi-session expires=1h path=/socket.io" + + # Enable sticky sessions (use same backend for all connections + # originated by a specific client, identified through its cookie) + nginx.ingress.kubernetes.io/affinity: "cookie" + nginx.ingress.kubernetes.io/affinity-mode: "persistent" + nginx.ingress.kubernetes.io/session-cookie-name: "tfs-nbi-session" + nginx.ingress.kubernetes.io/session-cookie-path: "/socket.io" + nginx.ingress.kubernetes.io/session-cookie-expires: "3600" + nginx.ingress.kubernetes.io/session-cookie-change-on-failure: "true" + + nginx.ingress.kubernetes.io/limit-rps: "50" # max requests per second per source IP + nginx.ingress.kubernetes.io/limit-connections: "50" # max concurrent connections per source IP + nginx.ingress.kubernetes.io/proxy-connect-timeout: "60" # max timeout for connecting to server + + # Enable long-lived connections, required for websocket/socket.io streams + nginx.ingress.kubernetes.io/proxy-send-timeout: "3600" # max timeout between two successive read operations + nginx.ingress.kubernetes.io/proxy-read-timeout: "3600" # max timeout between two successive write operations +spec: + ingressClassName: tfs-ingress-class-child + rules: + - http: + paths: + - path: /.well-known + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /restconf + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /socket.io + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /tfs-api + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /bmw + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /qkd_app + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /camara pathType: Prefix backend: service: name: nbiservice port: number: 8080 - - path: /()(tfs-api/.*) + - path: /agent-probes pathType: Prefix backend: service: name: nbiservice port: number: 8080 - - path: /()(bmw/.*) + - path: /osm-api pathType: Prefix backend: service: diff --git a/src/tests/ofc23/tfs-ingress-parent.yaml b/src/tests/ofc23/tfs-ingress-parent.yaml index ced1cd3a26ddaa46ca6ed34a5c76ef3d2d74b3df..a74df85994b68d92c885e3cef8fc6905dbb78d2a 100644 --- a/src/tests/ofc23/tfs-ingress-parent.yaml +++ b/src/tests/ofc23/tfs-ingress-parent.yaml @@ -15,43 +15,122 @@ apiVersion: networking.k8s.io/v1 kind: Ingress metadata: - name: tfs-ingress-parent + name: tfs-ingress-parent-web annotations: - nginx.ingress.kubernetes.io/rewrite-target: /$2 + nginx.ingress.kubernetes.io/rewrite-target: "/$2" + nginx.ingress.kubernetes.io/use-regex: "true" + nginx.ingress.kubernetes.io/limit-rps: "50" # max requests per second per source IP + nginx.ingress.kubernetes.io/limit-connections: "50" # max concurrent connections per source IP + nginx.ingress.kubernetes.io/proxy-connect-timeout: "60" # max timeout for connecting to server + nginx.ingress.kubernetes.io/proxy-send-timeout: "3600" # max timeout between two successive read operations + nginx.ingress.kubernetes.io/proxy-read-timeout: "3600" # max timeout between two successive write operations spec: ingressClassName: tfs-ingress-class-parent rules: - http: paths: - path: /webui(/|$)(.*) - pathType: Prefix + pathType: ImplementationSpecific backend: service: name: webuiservice port: number: 8004 - path: /grafana(/|$)(.*) - pathType: Prefix + pathType: ImplementationSpecific backend: service: name: webuiservice port: number: 3000 - - path: /()(restconf/.*) +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: tfs-ingress-parent-nbi + annotations: + # Enable websocket services and configure sticky cookies (seems not to work) + #nginx.org/websocket-services: "nbiservice" + #nginx.org/sticky-cookie-services: "serviceName=nbiservice tfs-nbi-session expires=1h path=/socket.io" + + # Enable sticky sessions (use same backend for all connections + # originated by a specific client, identified through its cookie) + nginx.ingress.kubernetes.io/affinity: "cookie" + nginx.ingress.kubernetes.io/affinity-mode: "persistent" + nginx.ingress.kubernetes.io/session-cookie-name: "tfs-nbi-session" + nginx.ingress.kubernetes.io/session-cookie-path: "/socket.io" + nginx.ingress.kubernetes.io/session-cookie-expires: "3600" + nginx.ingress.kubernetes.io/session-cookie-change-on-failure: "true" + + nginx.ingress.kubernetes.io/limit-rps: "50" # max requests per second per source IP + nginx.ingress.kubernetes.io/limit-connections: "50" # max concurrent connections per source IP + nginx.ingress.kubernetes.io/proxy-connect-timeout: "60" # max timeout for connecting to server + + # Enable long-lived connections, required for websocket/socket.io streams + nginx.ingress.kubernetes.io/proxy-send-timeout: "3600" # max timeout between two successive read operations + nginx.ingress.kubernetes.io/proxy-read-timeout: "3600" # max timeout between two successive write operations +spec: + ingressClassName: tfs-ingress-class-parent + rules: + - http: + paths: + - path: /.well-known + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /restconf + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /socket.io + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /tfs-api + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /bmw + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /qkd_app + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /camara pathType: Prefix backend: service: name: nbiservice port: number: 8080 - - path: /()(tfs-api/.*) + - path: /agent-probes pathType: Prefix backend: service: name: nbiservice port: number: 8080 - - path: /()(bmw/.*) + - path: /osm-api pathType: Prefix backend: service: diff --git a/src/tests/ofc25/.gitlab-ci.yml b/src/tests/ofc25/.gitlab-ci.yml index 9e19abf1787d6dc9e77a6d69fc95c56254710f97..c7b5ef9afb5340503fef14baf6119d34f13e1057 100644 --- a/src/tests/ofc25/.gitlab-ci.yml +++ b/src/tests/ofc25/.gitlab-ci.yml @@ -46,199 +46,282 @@ end2end_test ofc25: #needs: # - build ofc25 before_script: - - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY - - docker network rm -f na-br + # Do Docker cleanup + - docker ps --all --quiet | xargs --no-run-if-empty docker stop + - docker container prune --force + - docker ps --all --quiet | xargs --no-run-if-empty docker rm --force + - docker image prune --force + - docker network prune --force + - docker volume prune --all --force + - docker buildx prune --force + + # Check MicroK8s is ready + - microk8s status --wait-ready + - LOOP_MAX_ATTEMPTS=10 + - LOOP_COUNTER=0 + - > + while ! kubectl get pods --all-namespaces &> /dev/null; do + printf "%c" "." + sleep 1 + LOOP_COUNTER=$((LOOP_COUNTER + 1)) + if [ "$LOOP_COUNTER" -ge "$LOOP_MAX_ATTEMPTS" ]; then + echo "Max attempts reached, exiting the loop." + exit 1 + fi + done + - kubectl get pods --all-namespaces + # Delete secondary ingress controllers + - kubectl delete -f src/tests/${TEST_NAME}/nginx-ingress-controller-opt.yaml --ignore-not-found + - kubectl delete -f src/tests/${TEST_NAME}/nginx-ingress-controller-ip.yaml --ignore-not-found + - kubectl delete -f src/tests/${TEST_NAME}/nginx-ingress-controller-e2e.yaml --ignore-not-found + + # Always delete Kubernetes namespaces + - export K8S_NAMESPACES=$(kubectl get namespace -o jsonpath='{.items[*].metadata.name}') + - echo "K8S_NAMESPACES=${K8S_NAMESPACES}" + + - export OLD_NATS_NAMESPACES=$(echo "${K8S_NAMESPACES}" | tr ' ' '\n' | grep -E '^nats') + - echo "OLD_NATS_NAMESPACES=${OLD_NATS_NAMESPACES}" + - > + for ns in ${OLD_NATS_NAMESPACES}; do + if [[ "$ns" == nats* ]]; then + if helm3 status "$ns" &>/dev/null; then + helm3 uninstall "$ns" -n "$ns" + else + echo "Release '$ns' not found, skipping..." + fi + fi + done + - export OLD_NAMESPACES=$(echo "${K8S_NAMESPACES}" | tr ' ' '\n' | grep -E '^(tfs|crdb|qdb|kafka|nats)') + - echo "OLD_NAMESPACES=${OLD_NAMESPACES}" + - kubectl delete namespace ${OLD_NAMESPACES} || true + + # Clean-up Kubernetes Failed pods + - > + kubectl get pods --all-namespaces --no-headers --field-selector=status.phase=Failed + -o custom-columns=NAMESPACE:.metadata.namespace,NAME:.metadata.name | + xargs --no-run-if-empty --max-args=2 kubectl delete pod --namespace + + # Login Docker repository + - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY - - helm3 uninstall --namespace nats-e2e nats-e2e 2>/dev/null || echo "Namespace not found" - - helm3 uninstall --namespace nats-ip nats-ip 2>/dev/null || echo "Namespace not found" - - helm3 uninstall --namespace nats-opt nats-opt 2>/dev/null || echo "Namespace not found" - - helm3 uninstall --namespace nats nats 2>/dev/null || echo "Namespace not found" - - kubectl delete namespaces tfs tfs-ip tfs-opt tfs-e2e qdb qdb-e2e qdb-opt qdb-ip --ignore-not-found - - kubectl delete namespaces nats nats-ip nats-opt nats-e2e --ignore-not-found - - echo "HOLA" script: # Download Docker image to run the test - - echo "Que tal" - docker pull "${CI_REGISTRY_IMAGE}/${TEST_NAME}:latest" - + - docker pull asgamb1/oc23bgp.img:latest + - docker pull asgamb1/flexscale-node.img:latest # Check MicroK8s is ready - microk8s status --wait-ready + - LOOP_MAX_ATTEMPTS=10 + - LOOP_COUNTER=0 + - > + while ! kubectl get pods --all-namespaces &> /dev/null; do + printf "%c" "." + sleep 1 + LOOP_COUNTER=$((LOOP_COUNTER + 1)) + if [ "$LOOP_COUNTER" -ge "$LOOP_MAX_ATTEMPTS" ]; then + echo "Max attempts reached, exiting the loop." + exit 1 + fi + done - kubectl get pods --all-namespaces - - + # Deploy Optical Device Node Agents + - > + docker network create -d bridge --subnet=172.254.253.0/24 --gateway=172.254.253.254 + --ip-range=172.254.253.0/24 na-br + - > + docker run -dit --init --name na-t1 --network=na-br --ip 172.254.253.101 --publish 2022 + --volume "$PWD/src/tests/${TEST_NAME}/node-agents-config/startNetconfAgent-tp.sh:/confd/examples.confd/OC23/startNetconfAgent.sh" + --volume "$PWD/src/tests/${TEST_NAME}/node-agents-config/platform_t1.xml:/confd/examples.confd/OC23/platform.xml" + asgamb1/oc23bgp.img:latest /confd/examples.confd/OC23/startNetconfAgent.sh + - > + docker run -dit --init --name na-t2 --network=na-br --ip 172.254.253.102 --publish 2022 + --volume "$PWD/src/tests/${TEST_NAME}/node-agents-config/startNetconfAgent-tp.sh:/confd/examples.confd/OC23/startNetconfAgent.sh" + --volume "$PWD/src/tests/${TEST_NAME}/node-agents-config/platform_t2.xml:/confd/examples.confd/OC23/platform.xml" + asgamb1/oc23bgp.img:latest /confd/examples.confd/OC23/startNetconfAgent.sh + - > + docker run -dit --init --name na-r1 --network=na-br --ip 172.254.253.201 --publish 2022 + --volume "$PWD/src/tests/${TEST_NAME}/node-agents-config/startNetconfAgent-mg-on.sh:/confd/examples.confd/OC23/startNetconfAgent.sh" + --volume "$PWD/src/tests/${TEST_NAME}/node-agents-config/platform_r1.xml:/confd/examples.confd/OC23/platform.xml" + asgamb1/flexscale-node.img:latest /confd/examples.confd/OC23/startNetconfAgent.sh + - > + docker run -dit --init --name na-r2 --network=na-br --ip 172.254.253.202 --publish 2022 + --volume "$PWD/src/tests/${TEST_NAME}/node-agents-config/startNetconfAgent-mg-on.sh:/confd/examples.confd/OC23/startNetconfAgent.sh" + --volume "$PWD/src/tests/${TEST_NAME}/node-agents-config/platform_r2.xml:/confd/examples.confd/OC23/platform.xml" + asgamb1/flexscale-node.img:latest /confd/examples.confd/OC23/startNetconfAgent.sh + + + # Wait for initialization of Optical Device Node Agents + - sleep 3 + - docker ps -a + - while ! docker logs na-t1 2>&1 | grep -q '*** ConfD OpenConfig NETCONF agent ***'; do sleep 1; done + - while ! docker logs na-t2 2>&1 | grep -q '*** ConfD OpenConfig NETCONF agent ***'; do sleep 1; done + - while ! docker logs na-r1 2>&1 | grep -q '*** ConfD OpenConfig NETCONF agent ***'; do sleep 1; done + - while ! docker logs na-r2 2>&1 | grep -q '*** ConfD OpenConfig NETCONF agent ***'; do sleep 1; done + - sleep 3 + - docker ps -a + + # Configure TeraFlowSDN deployment # Uncomment if DEBUG log level is needed for the components - #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/contextservice.yaml - #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/deviceservice.yaml - #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="frontend").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/pathcompservice.yaml - #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/serviceservice.yaml + - yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/contextservice.yaml + - yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/deviceservice.yaml + - yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="frontend").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/pathcompservice.yaml + - yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/serviceservice.yaml #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/sliceservice.yaml #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/nbiservice.yaml - #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/e2eorchestratorservice.yaml - #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/vntmservice.yaml + #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/opticalcontrollerservice.yaml + #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/e2e_orchestratorservice.yaml + #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/vnt_managerservice.yaml - - # Deploy Optical TeraFlowSDN + # ===== Deploy Optical TeraFlowSDN ================================================== - source src/tests/${TEST_NAME}/deploy_specs_opt.sh - - # Delete secondary ingress controllers - - kubectl delete -f src/tests/ofc25/nginx-ingress-controller-opt.yaml --ignore-not-found # Create secondary ingress controllers - - kubectl apply -f src/tests/ofc25/nginx-ingress-controller-opt.yaml - # Deploy TFS for OPT - - source src/tests/ofc25/deploy_specs_opt.sh + - kubectl apply -f src/tests/${TEST_NAME}/nginx-ingress-controller-opt.yaml # Change the name for the database - cp manifests/contextservice.yaml manifests/contextservice.yaml.bak - | sed -i '/name: CRDB_DATABASE/{n;s/value: .*/value: "tfs_opt_context"/}' manifests/contextservice.yaml - + - ./deploy/crdb.sh - ./deploy/nats.sh + - ./deploy/kafka.sh # - ./deploy/qdb.sh - - - - ./deploy/expose_dashboard.sh + # - ./deploy/expose_dashboard.sh - ./deploy/tfs.sh - ./deploy/show.sh - mv manifests/contextservice.yaml.bak manifests/contextservice.yaml - - mv tfs_runtime_env_vars.sh tfs_runtime_env_vars_opt.sh - # - cp /var/teraflow/tfs_runtime_env_vars.sh /var/teraflow/tfs_runtime_env_vars_opt.sh + ## Wait for Context to be subscribed to NATS + #- while ! kubectl --namespace ${TFS_K8S_NAMESPACE} logs deployment/contextservice -c server 2>&1 | grep -q 'Subscriber is Ready? True'; do sleep 1; done + #- kubectl --namespace ${TFS_K8S_NAMESPACE} logs deployment/contextservice -c server - - # Deploy IP TeraFlowSDN - # Delete secondary ingress controllers - - kubectl delete -f src/tests/ofc25/nginx-ingress-controller-ip.yaml --ignore-not-found + # ===== Deploy Packet TeraFlowSDN =================================================== + - source src/tests/${TEST_NAME}/deploy_specs_ip.sh # Create secondary ingress controllers - - kubectl apply -f src/tests/ofc25/nginx-ingress-controller-ip.yaml - - # Deploy TFS for IP - - source src/tests/ofc25/deploy_specs_ip.sh + - kubectl apply -f src/tests/${TEST_NAME}/nginx-ingress-controller-ip.yaml # Change the name for the database - cp manifests/contextservice.yaml manifests/contextservice.yaml.bak - | sed -i '/name: CRDB_DATABASE/{n;s/value: .*/value: "tfs_ip_context"/}' manifests/contextservice.yaml - - - echo "Sleeping 60" - - sleep 60 - - # - source src/tests/${TEST_NAME}/deploy_specs_ip.sh + - ./deploy/crdb.sh - ./deploy/nats.sh + - ./deploy/kafka.sh # - ./deploy/qdb.sh - - ./deploy/expose_dashboard.sh + # - ./deploy/expose_dashboard.sh - ./deploy/tfs.sh - ./deploy/show.sh - # - cp /var/teraflow/tfs_runtime_env_vars.sh /var/teraflow/tfs_runtime_env_vars_ip.sh + - mv manifests/contextservice.yaml.bak manifests/contextservice.yaml - mv tfs_runtime_env_vars.sh tfs_runtime_env_vars_ip.sh - - ./src/tests/${TEST_NAME}/subscription_ws_ip.sh + ## Wait for Context to be subscribed to NATS + #- while ! kubectl --namespace ${TFS_K8S_NAMESPACE} logs deployment/contextservice -c server 2>&1 | grep -q 'Subscriber is Ready? True'; do sleep 1; done + #- kubectl --namespace ${TFS_K8S_NAMESPACE} logs deployment/contextservice -c server - # Deploy E2E TeraFlowSDN + # ===== Deploy End-to-End TeraFlowSDN =============================================== - source src/tests/${TEST_NAME}/deploy_specs_e2e.sh - - - # Delete secondary ingress controllers - - kubectl delete -f src/tests/ofc25/nginx-ingress-controller-e2e.yaml --ignore-not-found - # Create secondary ingress controllers - - kubectl apply -f src/tests/ofc25/nginx-ingress-controller-e2e.yaml + - kubectl apply -f src/tests/${TEST_NAME}/nginx-ingress-controller-e2e.yaml # Change the name for the database - cp manifests/contextservice.yaml manifests/contextservice.yaml.bak - | sed -i '/name: CRDB_DATABASE/{n;s/value: .*/value: "tfs_e2e_context"/}' manifests/contextservice.yaml - - # - sed -i '/name: CRDB_DATABASE/{n;s/value: .*/value: "tfs_e2e_context"/}' manifests/contextservice.yaml - + - ./deploy/crdb.sh - ./deploy/nats.sh + - ./deploy/kafka.sh # - ./deploy/qdb.sh - - ./deploy/expose_dashboard.sh + # - ./deploy/expose_dashboard.sh - ./deploy/tfs.sh - ./deploy/show.sh - # - ./src/tests/${TEST_NAME}/subscription_ws_e2e.sh - # - cp /var/teraflow/tfs_runtime_env_vars.sh /var/teraflow/tfs_runtime_env_vars_e2e.sh - mv manifests/contextservice.yaml.bak manifests/contextservice.yaml - - #Configure Subscription WS - - ./src/tests/ofc25/subscription_ws_e2e.sh - - mv tfs_runtime_env_vars.sh tfs_runtime_env_vars_e2e.sh - - - echo "Por aqui" - - sleep 600 - - # Run end-to-end tests - #- if docker ps -a | grep ${TEST_NAME}; then docker rm -f ${TEST_NAME}; fi - #- > - # docker run -t --name ${TEST_NAME} --network=host - # --volume "$PWD/tfs_runtime_env_vars.sh:/var/teraflow/tfs_runtime_env_vars.sh" - # --volume "$PWD/src/tests/${TEST_NAME}:/opt/results" - # $CI_REGISTRY_IMAGE/${TEST_NAME}:latest - - after_script: - # Dump TeraFlowSDN component logs - - echo "After" - - - source ./src/tests/${TEST_NAME}/deploy_specs_ip.sh - - kubectl --namespace tfs-ip logs deployment/contextservice -c server - - kubectl --namespace tfs-ip logs deployment/deviceservice -c server - - kubectl --namespace tfs-ip logs deployment/pathcompservice -c frontend - - kubectl --namespace tfs-ip logs deployment/serviceservice -c server - - kubectl --namespace tfs-ip logs deployment/nbiservice -c server - - kubectl --namespace tfs-ip logs deployment/vnt-managerservice -c server - - - source ./src/tests/${TEST_NAME}/deploy_specs_opt.sh - - kubectl --namespace tfs-opt logs deployment/contextservice -c server - - kubectl --namespace tfs-opt logs deployment/deviceservice -c server - - kubectl --namespace tfs-opt logs deployment/pathcompservice -c frontend - - kubectl --namespace tfs-opt logs deployment/serviceservice -c server - - kubectl --namespace tfs-opt logs deployment/nbiservice -c server - - - source ./src/tests/${TEST_NAME}/deploy_specs_e2e.sh - - kubectl --namespace tfs-e2e logs deployment/contextservice -c server - - kubectl --namespace tfs-e2e logs deployment/deviceservice -c server - - kubectl --namespace tfs-e2e logs deployment/pathcompservice -c frontend - - kubectl --namespace tfs-e2e logs deployment/serviceservice -c server - - kubectl --namespace tfs-e2e logs deployment/nbiservice -c server - - kubectl --namespace tfs-e2e logs deployment/e2e-orchestratorservice -c server + # Wait for Context to be subscribed to NATS + - while ! kubectl --namespace ${TFS_K8S_NAMESPACE} logs deployment/contextservice -c server 2>&1 | grep -q 'Subscriber is Ready? True'; do sleep 1; done + - kubectl --namespace ${TFS_K8S_NAMESPACE} logs deployment/contextservice -c server + # ===== Run End-to-End tests ======================================================== - if docker ps -a | grep ${TEST_NAME}; then docker rm -f ${TEST_NAME}; fi + - export TFS_RUNNER_IP=$(ip -4 route get 1.1.1.1 | awk '{for (i=1; i<=NF; ++i) if ($i == "src") {print $(i+1); exit}}') + - if [ -z "${TFS_RUNNER_IP}" ]; then export TFS_RUNNER_IP=$(hostname -I | awk '{print $1}'); fi + - if [ -z "${TFS_RUNNER_IP}" ]; then echo "Unable to determine TFS_RUNNER_IP on GitLab runner host"; exit 1; fi + - echo "Using GitLab runner host IP ${TFS_RUNNER_IP} for OFC25 E2E descriptor materialization" + - > + docker run -t --rm --name ${TEST_NAME} --network=host + --env TFS_RUNNER_IP="${TFS_RUNNER_IP}" + --volume "$PWD/tfs_runtime_env_vars_opt.sh:/var/teraflow/tfs_runtime_env_vars_opt.sh" + --volume "$PWD/tfs_runtime_env_vars_ip.sh:/var/teraflow/tfs_runtime_env_vars_ip.sh" + --volume "$PWD/tfs_runtime_env_vars_e2e.sh:/var/teraflow/tfs_runtime_env_vars_e2e.sh" + --volume "$PWD/src/tests/${TEST_NAME}:/opt/results" + $CI_REGISTRY_IMAGE/${TEST_NAME}:latest - # Dump container status and logs - - docker ps -a - - # Clean old docker images + after_script: + # Persist TeraFlowSDN and node-agent logs as artifacts instead of dumping them into the CI job log + - mkdir -p src/tests/${TEST_NAME}/logs + - kubectl logs --namespace tfs-e2e deployment/contextservice -c server > src/tests/${TEST_NAME}/logs/e2e-contextservice-server.log 2>&1 || true + - kubectl logs --namespace tfs-e2e deployment/deviceservice -c server > src/tests/${TEST_NAME}/logs/e2e-deviceservice-server.log 2>&1 || true + - kubectl logs --namespace tfs-e2e deployment/serviceservice -c server > src/tests/${TEST_NAME}/logs/e2e-serviceservice-server.log 2>&1 || true + - kubectl logs --namespace tfs-e2e deployment/pathcompservice -c frontend > src/tests/${TEST_NAME}/logs/e2e-pathcompservice-frontend.log 2>&1 || true + - kubectl logs --namespace tfs-e2e deployment/pathcompservice -c backend > src/tests/${TEST_NAME}/logs/e2e-pathcompservice-backend.log 2>&1 || true + - kubectl logs --namespace tfs-e2e deployment/webuiservice -c server > src/tests/${TEST_NAME}/logs/e2e-webuiservice-server.log 2>&1 || true + - kubectl logs --namespace tfs-e2e deployment/nbiservice -c server > src/tests/${TEST_NAME}/logs/e2e-nbiservice-server.log 2>&1 || true + - kubectl logs --namespace tfs-e2e deployment/e2e-orchestratorservice -c server > src/tests/${TEST_NAME}/logs/e2e-e2e-orchestratorservice-server.log 2>&1 || true + - kubectl logs --namespace tfs-ip deployment/contextservice -c server > src/tests/${TEST_NAME}/logs/ip-contextservice-server.log 2>&1 || true + - kubectl logs --namespace tfs-ip deployment/deviceservice -c server > src/tests/${TEST_NAME}/logs/ip-deviceservice-server.log 2>&1 || true + - kubectl logs --namespace tfs-ip deployment/serviceservice -c server > src/tests/${TEST_NAME}/logs/ip-serviceservice-server.log 2>&1 || true + - kubectl logs --namespace tfs-ip deployment/pathcompservice -c frontend > src/tests/${TEST_NAME}/logs/ip-pathcompservice-frontend.log 2>&1 || true + - kubectl logs --namespace tfs-ip deployment/pathcompservice -c backend > src/tests/${TEST_NAME}/logs/ip-pathcompservice-backend.log 2>&1 || true + - kubectl logs --namespace tfs-ip deployment/webuiservice -c server > src/tests/${TEST_NAME}/logs/ip-webuiservice-server.log 2>&1 || true + - kubectl logs --namespace tfs-ip deployment/nbiservice -c server > src/tests/${TEST_NAME}/logs/ip-nbiservice-server.log 2>&1 || true + - kubectl logs --namespace tfs-ip deployment/vnt-managerservice -c server > src/tests/${TEST_NAME}/logs/ip-vnt-managerservice-server.log 2>&1 || true + - kubectl logs --namespace tfs-opt deployment/contextservice -c server > src/tests/${TEST_NAME}/logs/opt-contextservice-server.log 2>&1 || true + - kubectl logs --namespace tfs-opt deployment/deviceservice -c server > src/tests/${TEST_NAME}/logs/opt-deviceservice-server.log 2>&1 || true + - kubectl logs --namespace tfs-opt deployment/serviceservice -c server > src/tests/${TEST_NAME}/logs/opt-serviceservice-server.log 2>&1 || true + - kubectl logs --namespace tfs-opt deployment/pathcompservice -c frontend > src/tests/${TEST_NAME}/logs/opt-pathcompservice-frontend.log 2>&1 || true + - kubectl logs --namespace tfs-opt deployment/pathcompservice -c backend > src/tests/${TEST_NAME}/logs/opt-pathcompservice-backend.log 2>&1 || true + - kubectl logs --namespace tfs-opt deployment/webuiservice -c server > src/tests/${TEST_NAME}/logs/opt-webuiservice-server.log 2>&1 || true + - kubectl logs --namespace tfs-opt deployment/nbiservice -c server > src/tests/${TEST_NAME}/logs/opt-nbiservice-server.log 2>&1 || true + - kubectl logs --namespace tfs-opt deployment/opticalcontrollerservice -c server > src/tests/${TEST_NAME}/logs/opt-opticalcontrollerservice-server.log 2>&1 || true + - docker logs na-t1 > src/tests/${TEST_NAME}/logs/na-na-t1.log 2>&1 || true + - docker logs na-t2 > src/tests/${TEST_NAME}/logs/na-na-t2.log 2>&1 || true + - docker logs na-r1 > src/tests/${TEST_NAME}/logs/na-na-r1.log 2>&1 || true + - docker logs na-r2 > src/tests/${TEST_NAME}/logs/na-na-r2.log 2>&1 || true + + # Clean up + - docker ps --all --quiet | xargs --no-run-if-empty docker stop + - docker container prune --force + - docker ps --all --quiet | xargs --no-run-if-empty docker rm --force + - docker network prune --force + - docker volume prune --all --force - docker image prune --force - - - helm3 uninstall --namespace nats-e2e nats-e2e 2>/dev/null || echo "Namespace not found" - - helm3 uninstall --namespace nats-ip nats-ip 2>/dev/null || echo "Namespace not found" - - helm3 uninstall --namespace nats-opt nats-opt 2>/dev/null || echo "Namespace not found" - - helm3 uninstall --namespace nats nats 2>/dev/null || echo "Namespace not found" - - kubectl delete namespaces tfs tfs-ip tfs-opt tfs-e2e qdb qdb-e2e qdb-opt qdb-ip --ignore-not-found - - kubectl delete namespaces nats nats-ip nats-opt nats-e2e --ignore-not-found - - echo "Adios" - - sleep 600 - + - kubectl delete namespaces tfs-ip tfs-opt tfs-e2e --ignore-not-found + - kubectl delete -f src/tests/${TEST_NAME}/nginx-ingress-controller-opt.yaml --ignore-not-found + - kubectl delete -f src/tests/${TEST_NAME}/nginx-ingress-controller-ip.yaml --ignore-not-found + - kubectl delete -f src/tests/${TEST_NAME}/nginx-ingress-controller-e2e.yaml --ignore-not-found + - helm3 uninstall --namespace nats-e2e nats-e2e || echo "Namespace not found" + - helm3 uninstall --namespace nats-ip nats-ip || echo "Namespace not found" + - helm3 uninstall --namespace nats-opt nats-opt || echo "Namespace not found" + - kubectl delete namespaces nats-ip nats-opt nats-e2e --ignore-not-found + - kubectl delete namespaces crdb qdb-e2e qdb-opt qdb-ip --ignore-not-found #coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/' rules: @@ -246,5 +329,7 @@ end2end_test ofc25: - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' artifacts: when: always + paths: + - ./src/tests/${TEST_NAME}/logs/*.log reports: junit: ./src/tests/${TEST_NAME}/report_*.xml diff --git a/src/tests/ofc25/Dockerfile b/src/tests/ofc25/Dockerfile index b225e974ff7452ac54c55ae19957b8624b153d02..11de11bf7740116ad8b676729d319b9ae8654b48 100644 --- a/src/tests/ofc25/Dockerfile +++ b/src/tests/ofc25/Dockerfile @@ -22,10 +22,15 @@ RUN apt-get --yes --quiet --quiet update && \ # Set Python to show logs as they occur ENV PYTHONUNBUFFERED=0 +# Download the gRPC health probe +RUN GRPC_HEALTH_PROBE_VERSION=v0.2.0 && \ + wget -qO/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-amd64 && \ + chmod +x /bin/grpc_health_probe + # Get generic Python packages RUN python3 -m pip install --upgrade 'pip==25.2' RUN python3 -m pip install --upgrade 'setuptools==79.0.0' 'wheel==0.45.1' -RUN python3 -m pip install --upgrade 'pip-tools==7.3.0's==7.3.0' +RUN python3 -m pip install --upgrade 'pip-tools==7.3.0' # Get common Python packages # Note: this step enables sharing the previous Docker build steps among all the Python components @@ -77,24 +82,64 @@ COPY src/vnt_manager/__init__.py vnt_manager/__init__.py COPY src/vnt_manager/client/. vnt_manager/client/ COPY src/tests/*.py ./tests/ COPY src/tests/ofc25/__init__.py ./tests/ofc25/__init__.py -COPY src/tests/ofc25/descriptors/descriptor_ip.json ./tests/ofc25/descriptors/descriptor_ip.json -COPY src/tests/ofc25/descriptors/descriptor_opt.json ./tests/ofc25/descriptors/descriptor_opt.json -COPY src/tests/ofc25/descriptors/descriptor_e2e.json ./tests/ofc25/descriptors/descriptor_e2e.json +COPY src/tests/ofc25/descriptors/*.json ./tests/ofc25/descriptors/ COPY src/tests/ofc25/tests/. ./tests/ofc25/tests/ RUN tee ./run_tests.sh < `ip` -> `opt`) using parameterized cleanup test + +Device enabled check: +- Bootstrap includes a device-operational-status check. +- It raises an exception if retries are exhausted. + +## Virtual-Link Validation +Creation test verifies: +- Initially: no services in `ip`, `e2e`, `opt`; no virtual links in `ip`. +- After each virtual link creation: + - `ip`: expected virtual links exist, no services. + - `e2e`: number of active optical services increases `1 -> 2 -> 3` (one per virtual link), each with one connection. + - `opt`: one active optical service with one connection while any virtual link exists. + +Deletion test verifies reverse behavior: +- Remove virtual links in reverse order (`03`, `02`, `01`). +- `e2e` services decrease `3 -> 2 -> 1 -> 0`. +- `opt` stays at one active service until last virtual link is removed, then `0`. +- Final state: no services anywhere and no virtual links in `ip`. + +## How To Run +From repo root: +```bash +cd /home/tfs/tfs-ctrl +``` + +Run full OFC25 suite: +```bash +./src/tests/ofc25/run_test.sh all +``` + +Run specific phases: +```bash +./src/tests/ofc25/run_test.sh init_opt +./src/tests/ofc25/run_test.sh init_ip +./src/tests/ofc25/run_test.sh init_e2e +./src/tests/ofc25/run_test.sh service +``` + +## Direct Pytest (Parameterized Bootstrap/Cleanup) +Examples: +```bash +PYTHONPATH=src python -m pytest -v src/tests/ofc25/tests/test_functional_bootstrap.py \ + --tfs-runtime-script=tfs_runtime_env_vars_opt.sh \ + --tfs-profile=opt \ + --tfs-topology-descriptor=topology_opt.json +``` + +```bash +PYTHONPATH=src python -m pytest -v src/tests/ofc25/tests/test_functional_cleanup.py \ + --tfs-runtime-script=tfs_runtime_env_vars_e2e.sh \ + --tfs-profile=e2e \ + --tfs-topology-descriptor=topology_e2e.json +``` + +## CI / Docker +The OFC25 Docker test image (`src/tests/ofc25/Dockerfile`) executes the same sequence via `/var/teraflow/run_tests.sh`, writing JUnit reports under `/opt/results`. + +In GitLab CI, `src/tests/ofc25/.gitlab-ci.yml` discovers the runner host IP before `docker run` and passes it as `TFS_RUNNER_IP` into the test container. This is required because detecting the address from inside the container would return the container/network namespace address instead of the host address that `e2e` must reach. diff --git a/src/tests/ofc25/_old/deploy_e2e.sh b/src/tests/ofc25/_old/deploy_e2e.sh deleted file mode 100755 index 8d2903463e4d3ccd77354140869b4b99fd3ac782..0000000000000000000000000000000000000000 --- a/src/tests/ofc25/_old/deploy_e2e.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/bash -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# Delete old namespaces -kubectl delete namespace tfs-e2e - -# Delete secondary ingress controllers -kubectl delete -f src/tests/ofc25/nginx-ingress-controller-e2e.yaml - -# Create secondary ingress controllers -kubectl apply -f src/tests/ofc25/nginx-ingress-controller-e2e.yaml - -# Deploy TFS for E2E -source src/tests/ofc25/deploy_specs_e2e.sh - -# Change the name for the database -cp manifests/contextservice.yaml manifests/contextservice.yaml.bak -sed -i '/name: CRDB_DATABASE/{n;s/value: .*/value: "tfs_e2e_context"/}' manifests/contextservice.yaml -./deploy/all.sh -mv manifests/contextservice.yaml.bak manifests/contextservice.yaml - -#Configure Subscription WS -./src/tests/ofc25/subscription_ws_e2e.sh - -mv tfs_runtime_env_vars.sh tfs_runtime_env_vars_e2e.sh diff --git a/src/tests/ofc25/_old/deploy_ip.sh b/src/tests/ofc25/_old/deploy_ip.sh deleted file mode 100755 index fa7d4ff49af13b39784a82e0f7a8bfc29698e0d2..0000000000000000000000000000000000000000 --- a/src/tests/ofc25/_old/deploy_ip.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/bash -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# Delete old namespaces -kubectl delete namespace tfs-ip - -# Delete secondary ingress controllers -kubectl delete -f src/tests/ofc25/nginx-ingress-controller-ip.yaml - -# Create secondary ingress controllers -kubectl apply -f src/tests/ofc25/nginx-ingress-controller-ip.yaml - -# Deploy TFS for IP -source src/tests/ofc25/deploy_specs_ip.sh - -# Change the name for the database -cp manifests/contextservice.yaml manifests/contextservice.yaml.bak -sed -i '/name: CRDB_DATABASE/{n;s/value: .*/value: "tfs_ip_context"/}' manifests/contextservice.yaml -./deploy/all.sh -mv manifests/contextservice.yaml.bak manifests/contextservice.yaml - -#Configure Subscription WS -./src/tests/ofc25/subscription_ws_ip.sh - -mv tfs_runtime_env_vars.sh tfs_runtime_env_vars_ip.sh diff --git a/src/tests/ofc25/_old/deploy_opt.sh b/src/tests/ofc25/_old/deploy_opt.sh deleted file mode 100755 index b3556510925f94a205eb43bb91419d71fc86a0d8..0000000000000000000000000000000000000000 --- a/src/tests/ofc25/_old/deploy_opt.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# Delete old namespaces -kubectl delete namespace tfs-opt - -# Delete secondary ingress controllers -kubectl delete -f src/tests/ofc25/nginx-ingress-controller-opt.yaml - -# Create secondary ingress controllers -kubectl apply -f src/tests/ofc25/nginx-ingress-controller-opt.yaml - -# Deploy TFS for OPT -source src/tests/ofc25/deploy_specs_opt.sh - -# Change the name for the database -cp manifests/contextservice.yaml manifests/contextservice.yaml.bak -sed -i '/name: CRDB_DATABASE/{n;s/value: .*/value: "tfs_opt_context"/}' manifests/contextservice.yaml -./deploy/all.sh -mv manifests/contextservice.yaml.bak manifests/contextservice.yaml - -mv tfs_runtime_env_vars.sh tfs_runtime_env_vars_opt.sh diff --git a/src/tests/ofc25/deploy.sh b/src/tests/ofc25/deploy_all_in_one.sh similarity index 71% rename from src/tests/ofc25/deploy.sh rename to src/tests/ofc25/deploy_all_in_one.sh index 0625da17290a1842053cd6d09a4b2cebc396a347..85d7f97e270c6dfc97e3d8d837cc7b36c63badb8 100755 --- a/src/tests/ofc25/deploy.sh +++ b/src/tests/ofc25/deploy_all_in_one.sh @@ -14,26 +14,26 @@ # limitations under the License. # ===== Check Microk8s is ready ============================== -#microk8s status --wait-ready -#kubectl get pods --all-namespaces +microk8s status --wait-ready +kubectl get pods --all-namespaces # ===== Cleanup old deployments ============================== -#helm3 uninstall --namespace nats-e2e nats-e2e 2>/dev/null || true -#helm3 uninstall --namespace nats-ip nats-ip 2>/dev/null || true -#helm3 uninstall --namespace nats-opt nats-opt 2>/dev/null || true -#helm3 uninstall --namespace nats nats 2>/dev/null || true -#kubectl delete namespaces tfs tfs-ip tfs-opt tfs-e2e --ignore-not-found -#kubectl delete namespaces qdb qdb-e2e qdb-opt qdb-ip --ignore-not-found -#kubectl delete namespaces kafka kafka-ip kafka-opt kafka-e2e --ignore-not-found -#kubectl delete namespaces nats nats-ip nats-opt nats-e2e --ignore-not-found -#kubectl delete -f src/tests/ofc25/nginx-ingress-controller-opt.yaml --ignore-not-found -#kubectl delete -f src/tests/ofc25/nginx-ingress-controller-ip.yaml --ignore-not-found -#kubectl delete -f src/tests/ofc25/nginx-ingress-controller-e2e.yaml --ignore-not-found -#sleep 5 +helm3 uninstall --namespace nats-e2e nats-e2e 2>/dev/null || true +helm3 uninstall --namespace nats-ip nats-ip 2>/dev/null || true +helm3 uninstall --namespace nats-opt nats-opt 2>/dev/null || true +helm3 uninstall --namespace nats nats 2>/dev/null || true +kubectl delete namespaces tfs tfs-ip tfs-opt tfs-e2e --ignore-not-found +kubectl delete namespaces qdb qdb-e2e qdb-opt qdb-ip --ignore-not-found +kubectl delete namespaces kafka kafka-ip kafka-opt kafka-e2e --ignore-not-found +kubectl delete namespaces nats nats-ip nats-opt nats-e2e --ignore-not-found +kubectl delete -f src/tests/ofc25/nginx-ingress-controller-opt.yaml --ignore-not-found +kubectl delete -f src/tests/ofc25/nginx-ingress-controller-ip.yaml --ignore-not-found +kubectl delete -f src/tests/ofc25/nginx-ingress-controller-e2e.yaml --ignore-not-found +sleep 5 # ===== Check Microk8s is ready ============================== -#microk8s status --wait-ready -#kubectl get pods --all-namespaces +microk8s status --wait-ready +kubectl get pods --all-namespaces # Configure TeraFlowSDN deployment # Uncomment if DEBUG log level is needed for the components @@ -52,12 +52,14 @@ kubectl apply -f src/tests/ofc25/nginx-ingress-controller-ip.yaml kubectl apply -f src/tests/ofc25/nginx-ingress-controller-e2e.yaml cp manifests/contextservice.yaml manifests/contextservice.yaml.bak +cp src/webui/service/templates/main/home.html src/webui/service/templates/main/home.html.bak # ===== Deploy Optical TeraFlowSDN ============================== source src/tests/ofc25/deploy_specs_opt.sh cp manifests/contextservice.yaml.bak manifests/contextservice.yaml sed -i '/name: CRDB_DATABASE/{n;s/value: .*/value: "tfs_opt_context"/}' manifests/contextservice.yaml -sed -i 's|\(

ETSI TeraFlowSDN Controller\)

|\1 (Optical)|' src/webui/service/templates/main/home.html +cp src/webui/service/templates/main/home.html.bak src/webui/service/templates/main/home.html +sed -i 's|\(

ETSI TeraFlowSDN Controller[^<]*\)

|\1 (Optical)|' src/webui/service/templates/main/home.html ./deploy/crdb.sh ./deploy/nats.sh @@ -74,7 +76,8 @@ mv tfs_runtime_env_vars.sh tfs_runtime_env_vars_opt.sh source src/tests/ofc25/deploy_specs_ip.sh cp manifests/contextservice.yaml.bak manifests/contextservice.yaml sed -i '/name: CRDB_DATABASE/{n;s/value: .*/value: "tfs_ip_context"/}' manifests/contextservice.yaml -sed -i 's|\(

ETSI TeraFlowSDN Controller\)

|\1 (Packet)|' src/webui/service/templates/main/home.html +cp src/webui/service/templates/main/home.html.bak src/webui/service/templates/main/home.html +sed -i 's|\(

ETSI TeraFlowSDN Controller[^<]*\)

|\1 (Packet)|' src/webui/service/templates/main/home.html ./deploy/crdb.sh ./deploy/nats.sh @@ -91,7 +94,8 @@ mv tfs_runtime_env_vars.sh tfs_runtime_env_vars_ip.sh source src/tests/ofc25/deploy_specs_e2e.sh cp manifests/contextservice.yaml.bak manifests/contextservice.yaml sed -i '/name: CRDB_DATABASE/{n;s/value: .*/value: "tfs_e2e_context"/}' manifests/contextservice.yaml -sed -i 's|\(

ETSI TeraFlowSDN Controller\)

|\1 (End-to-End)|' src/webui/service/templates/main/home.html +cp src/webui/service/templates/main/home.html.bak src/webui/service/templates/main/home.html +sed -i 's|\(

ETSI TeraFlowSDN Controller[^<]*\)

|\1 (End-to-End)|' src/webui/service/templates/main/home.html ./deploy/crdb.sh ./deploy/nats.sh @@ -106,6 +110,7 @@ mv tfs_runtime_env_vars.sh tfs_runtime_env_vars_e2e.sh # ===== Recovering files ========================= mv manifests/contextservice.yaml.bak manifests/contextservice.yaml +mv src/webui/service/templates/main/home.html.bak src/webui/service/templates/main/home.html # ===== Wait Content for NATS Subscription ========================= diff --git a/src/tests/ofc25/deploy_specs_e2e.sh b/src/tests/ofc25/deploy_specs_e2e.sh index a2664e1951a028e442a105984d30d390ff1b3a18..db04af0842431166d66cfbb0f1b0bcc8a5081d3f 100755 --- a/src/tests/ofc25/deploy_specs_e2e.sh +++ b/src/tests/ofc25/deploy_specs_e2e.sh @@ -211,3 +211,6 @@ export KFK_SERVER_PORT="9092" # Set the flag to YES for redeploying of Apache Kafka export KFK_REDEPLOY="" + +# Set the Kafka server address environment variable used by TFS components +export KFK_SERVER_ADDRESS="kafka-service.${KFK_NAMESPACE}.svc.cluster.local:${KFK_SERVER_PORT}" diff --git a/src/tests/ofc25/deploy_specs_ip.sh b/src/tests/ofc25/deploy_specs_ip.sh index d48fe662b6aeaa64ff76fe79148534d40f8c6566..5d5e21b6348764e9f08c02bfd3d8a1e8e912d568 100755 --- a/src/tests/ofc25/deploy_specs_ip.sh +++ b/src/tests/ofc25/deploy_specs_ip.sh @@ -211,3 +211,6 @@ export KFK_SERVER_PORT="9092" # Set the flag to YES for redeploying of Apache Kafka export KFK_REDEPLOY="" + +# Set the Kafka server address environment variable used by TFS components +export KFK_SERVER_ADDRESS="kafka-service.${KFK_NAMESPACE}.svc.cluster.local:${KFK_SERVER_PORT}" diff --git a/src/tests/ofc25/deploy_specs_opt.sh b/src/tests/ofc25/deploy_specs_opt.sh index 0a898e1a48f1628d9d3b3acd374533fff80bc062..0b45f934f80b6839bda74bc3cf5e0dd6492f9106 100755 --- a/src/tests/ofc25/deploy_specs_opt.sh +++ b/src/tests/ofc25/deploy_specs_opt.sh @@ -211,3 +211,6 @@ export KFK_SERVER_PORT="9092" # Set the flag to YES for redeploying of Apache Kafka export KFK_REDEPLOY="" + +# Set the Kafka server address environment variable used by TFS components +export KFK_SERVER_ADDRESS="kafka-service.${KFK_NAMESPACE}.svc.cluster.local:${KFK_SERVER_PORT}" diff --git a/src/tests/ofc25/descriptors/topology_e2e-netorch.json b/src/tests/ofc25/descriptors/topology_e2e-netorch.json deleted file mode 100644 index 6d5d119bb42e09312548b8fda067503b757e867c..0000000000000000000000000000000000000000 --- a/src/tests/ofc25/descriptors/topology_e2e-netorch.json +++ /dev/null @@ -1,85 +0,0 @@ -{ - "contexts": [ - {"context_id": {"context_uuid": {"uuid": "admin"}}} - ], - "topologies": [ - {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}} - ], - "devices": [ - { - "device_id": {"device_uuid": {"uuid": "TFS-PACKET"}}, "device_type": "teraflowsdn", - "device_drivers": ["DEVICEDRIVER_IETF_L3VPN"], "device_operational_status": "DEVICEOPERATIONALSTATUS_UNDEFINED", - "device_config": {"config_rules": [ - {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "10.1.1.96"}}, - {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "8002"}}, - {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": { - "scheme": "http", "username": "admin", "password": "admin", "import_topology": "topology" - }}} - ]} - }, - { - "device_id": {"device_uuid": {"uuid": "TFS-OPTICAL"}}, "device_type": "teraflowsdn", - "device_drivers": ["DEVICEDRIVER_OPTICAL_TFS"], "device_operational_status": "DEVICEOPERATIONALSTATUS_UNDEFINED", - "device_config": {"config_rules": [ - {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "10.1.1.96"}}, - {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "8003"}}, - {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": { - "scheme": "http", "username": "admin", "password": "admin", "import_topology": "topology" - }}} - ]} - } - ], - "links": [ - {"link_id": {"link_uuid": {"uuid": "IP1-T1.1"}}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "IP1" }}, "endpoint_uuid": {"uuid": "PORT-xe1"}}, - {"device_id": {"device_uuid": {"uuid": "T1.1"}}, "endpoint_uuid": {"uuid": "CLIENT" }} - ]}, - {"link_id": {"link_uuid": {"uuid": "IP1-T1.2"}}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "IP1" }}, "endpoint_uuid": {"uuid": "PORT-xe2"}}, - {"device_id": {"device_uuid": {"uuid": "T1.2"}}, "endpoint_uuid": {"uuid": "CLIENT" }} - ]}, - {"link_id": {"link_uuid": {"uuid": "IP1-T1.3"}}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "IP1" }}, "endpoint_uuid": {"uuid": "PORT-xe3"}}, - {"device_id": {"device_uuid": {"uuid": "T1.3"}}, "endpoint_uuid": {"uuid": "CLIENT" }} - ]}, - - {"link_id": {"link_uuid": {"uuid": "IP2-T2.1"}}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "IP2" }}, "endpoint_uuid": {"uuid": "PORT-xe1"}}, - {"device_id": {"device_uuid": {"uuid": "T2.1"}}, "endpoint_uuid": {"uuid": "CLIENT" }} - ]}, - {"link_id": {"link_uuid": {"uuid": "IP2-T2.2"}}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "IP2" }}, "endpoint_uuid": {"uuid": "PORT-xe2"}}, - {"device_id": {"device_uuid": {"uuid": "T2.2"}}, "endpoint_uuid": {"uuid": "CLIENT" }} - ]}, - {"link_id": {"link_uuid": {"uuid": "IP2-T2.3"}}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "IP2" }}, "endpoint_uuid": {"uuid": "PORT-xe3"}}, - {"device_id": {"device_uuid": {"uuid": "T2.3"}}, "endpoint_uuid": {"uuid": "CLIENT" }} - ]}, - - {"link_id": {"link_uuid": {"uuid": "T1.1-IP1"}}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "T1.1"}}, "endpoint_uuid": {"uuid": "CLIENT" }}, - {"device_id": {"device_uuid": {"uuid": "IP1" }}, "endpoint_uuid": {"uuid": "PORT-xe1"}} - ]}, - {"link_id": {"link_uuid": {"uuid": "T1.2-IP1"}}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "T1.2"}}, "endpoint_uuid": {"uuid": "CLIENT" }}, - {"device_id": {"device_uuid": {"uuid": "IP1" }}, "endpoint_uuid": {"uuid": "PORT-xe2"}} - ]}, - {"link_id": {"link_uuid": {"uuid": "T1.3-IP1"}}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "T1.3"}}, "endpoint_uuid": {"uuid": "CLIENT" }}, - {"device_id": {"device_uuid": {"uuid": "IP1" }}, "endpoint_uuid": {"uuid": "PORT-xe3"}} - ]}, - - {"link_id": {"link_uuid": {"uuid": "T2.1-IP2"}}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "T2.1"}}, "endpoint_uuid": {"uuid": "CLIENT" }}, - {"device_id": {"device_uuid": {"uuid": "IP2" }}, "endpoint_uuid": {"uuid": "PORT-xe1"}} - ]}, - {"link_id": {"link_uuid": {"uuid": "T2.2-IP2"}}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "T2.2"}}, "endpoint_uuid": {"uuid": "CLIENT" }}, - {"device_id": {"device_uuid": {"uuid": "IP2" }}, "endpoint_uuid": {"uuid": "PORT-xe2"}} - ]}, - {"link_id": {"link_uuid": {"uuid": "T2.3-IP2"}}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "T2.3"}}, "endpoint_uuid": {"uuid": "CLIENT" }}, - {"device_id": {"device_uuid": {"uuid": "IP2" }}, "endpoint_uuid": {"uuid": "PORT-xe3"}} - ]} - ] -} diff --git a/src/tests/ofc25/descriptors/topology_e2e-local-vm.json b/src/tests/ofc25/descriptors/topology_e2e.json similarity index 100% rename from src/tests/ofc25/descriptors/topology_e2e-local-vm.json rename to src/tests/ofc25/descriptors/topology_e2e.json diff --git a/src/tests/ofc25/descriptors/create-vlink-01.json b/src/tests/ofc25/descriptors/virtual_link_01.json similarity index 100% rename from src/tests/ofc25/descriptors/create-vlink-01.json rename to src/tests/ofc25/descriptors/virtual_link_01.json diff --git a/src/tests/ofc25/descriptors/create-vlink-02.json b/src/tests/ofc25/descriptors/virtual_link_02.json similarity index 100% rename from src/tests/ofc25/descriptors/create-vlink-02.json rename to src/tests/ofc25/descriptors/virtual_link_02.json diff --git a/src/tests/ofc25/descriptors/create-vlink-03.json b/src/tests/ofc25/descriptors/virtual_link_03.json similarity index 100% rename from src/tests/ofc25/descriptors/create-vlink-03.json rename to src/tests/ofc25/descriptors/virtual_link_03.json diff --git a/src/tests/ofc25/dump-logs.sh b/src/tests/ofc25/dump-logs.sh index d389594e4014c29d1402a0ffa33a03694265377b..44c615a116f1167c52568b00ecc27b965636f5e3 100755 --- a/src/tests/ofc25/dump-logs.sh +++ b/src/tests/ofc25/dump-logs.sh @@ -13,41 +13,47 @@ # See the License for the specific language governing permissions and # limitations under the License. +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +TMP_LOG_PATH="$SCRIPT_DIR/tmp/exec" -rm logs -rf tmp/exec -mkdir -p tmp/exec +echo "Cleaning old logs from $TMP_LOG_PATH" +rm -rf $TMP_LOG_PATH + +echo "Dumping logs to $TMP_LOG_PATH" +mkdir -p $TMP_LOG_PATH +cd $TMP_LOG_PATH echo "Collecting logs for E2E..." -kubectl logs --namespace tfs-e2e deployment/contextservice -c server > tmp/exec/e2e-context.log -kubectl logs --namespace tfs-e2e deployment/deviceservice -c server > tmp/exec/e2e-device.log -kubectl logs --namespace tfs-e2e deployment/serviceservice -c server > tmp/exec/e2e-service.log -kubectl logs --namespace tfs-e2e deployment/pathcompservice -c frontend > tmp/exec/e2e-pathcomp-frontend.log -kubectl logs --namespace tfs-e2e deployment/pathcompservice -c backend > tmp/exec/e2e-pathcomp-backend.log -kubectl logs --namespace tfs-e2e deployment/webuiservice -c server > tmp/exec/e2e-webui.log -kubectl logs --namespace tfs-e2e deployment/nbiservice -c server > tmp/exec/e2e-nbi.log -kubectl logs --namespace tfs-e2e deployment/e2e-orchestratorservice -c server > tmp/exec/e2e-orch.log +kubectl logs --namespace tfs-e2e deployment/contextservice -c server > e2e-context.log +kubectl logs --namespace tfs-e2e deployment/deviceservice -c server > e2e-device.log +kubectl logs --namespace tfs-e2e deployment/serviceservice -c server > e2e-service.log +kubectl logs --namespace tfs-e2e deployment/pathcompservice -c frontend > e2e-pathcomp-frontend.log +kubectl logs --namespace tfs-e2e deployment/pathcompservice -c backend > e2e-pathcomp-backend.log +kubectl logs --namespace tfs-e2e deployment/webuiservice -c server > e2e-webui.log +kubectl logs --namespace tfs-e2e deployment/nbiservice -c server > e2e-nbi.log +kubectl logs --namespace tfs-e2e deployment/e2e-orchestratorservice -c server > e2e-orch.log printf "\n" echo "Collecting logs for IP..." -kubectl logs --namespace tfs-ip deployment/contextservice -c server > tmp/exec/ip-context.log -kubectl logs --namespace tfs-ip deployment/deviceservice -c server > tmp/exec/ip-device.log -kubectl logs --namespace tfs-ip deployment/serviceservice -c server > tmp/exec/ip-service.log -kubectl logs --namespace tfs-ip deployment/pathcompservice -c frontend > tmp/exec/ip-pathcomp-frontend.log -kubectl logs --namespace tfs-ip deployment/pathcompservice -c backend > tmp/exec/ip-pathcomp-backend.log -kubectl logs --namespace tfs-ip deployment/webuiservice -c server > tmp/exec/ip-webui.log -kubectl logs --namespace tfs-ip deployment/nbiservice -c server > tmp/exec/ip-nbi.log -kubectl logs --namespace tfs-ip deployment/vnt-managerservice -c server > tmp/exec/ip-vntm.log +kubectl logs --namespace tfs-ip deployment/contextservice -c server > ip-context.log +kubectl logs --namespace tfs-ip deployment/deviceservice -c server > ip-device.log +kubectl logs --namespace tfs-ip deployment/serviceservice -c server > ip-service.log +kubectl logs --namespace tfs-ip deployment/pathcompservice -c frontend > ip-pathcomp-frontend.log +kubectl logs --namespace tfs-ip deployment/pathcompservice -c backend > ip-pathcomp-backend.log +kubectl logs --namespace tfs-ip deployment/webuiservice -c server > ip-webui.log +kubectl logs --namespace tfs-ip deployment/nbiservice -c server > ip-nbi.log +kubectl logs --namespace tfs-ip deployment/vnt-managerservice -c server > ip-vntm.log printf "\n" echo "Collecting logs for OPT..." -kubectl logs --namespace tfs-opt deployment/contextservice -c server > tmp/exec/opt-context.log -kubectl logs --namespace tfs-opt deployment/deviceservice -c server > tmp/exec/opt-device.log -kubectl logs --namespace tfs-opt deployment/serviceservice -c server > tmp/exec/opt-service.log -kubectl logs --namespace tfs-opt deployment/pathcompservice -c frontend > tmp/exec/opt-pathcomp-frontend.log -kubectl logs --namespace tfs-opt deployment/pathcompservice -c backend > tmp/exec/opt-pathcomp-backend.log -kubectl logs --namespace tfs-opt deployment/webuiservice -c server > tmp/exec/opt-webui.log -kubectl logs --namespace tfs-opt deployment/nbiservice -c server > tmp/exec/opt-nbi.log -kubectl logs --namespace tfs-opt deployment/opticalcontrollerservice -c server > tmp/exec/opt-ctrl.log +kubectl logs --namespace tfs-opt deployment/contextservice -c server > opt-context.log +kubectl logs --namespace tfs-opt deployment/deviceservice -c server > opt-device.log +kubectl logs --namespace tfs-opt deployment/serviceservice -c server > opt-service.log +kubectl logs --namespace tfs-opt deployment/pathcompservice -c frontend > opt-pathcomp-frontend.log +kubectl logs --namespace tfs-opt deployment/pathcompservice -c backend > opt-pathcomp-backend.log +kubectl logs --namespace tfs-opt deployment/webuiservice -c server > opt-webui.log +kubectl logs --namespace tfs-opt deployment/nbiservice -c server > opt-nbi.log +kubectl logs --namespace tfs-opt deployment/opticalcontrollerservice -c server > opt-ctrl.log printf "\n" echo "Done!" diff --git a/src/tests/ofc25/node-agents-config/platform_r1.xml b/src/tests/ofc25/node-agents-config/platform_r1.xml new file mode 100644 index 0000000000000000000000000000000000000000..23b6b839b5823e074f5487102db6bf2fed904a36 --- /dev/null +++ b/src/tests/ofc25/node-agents-config/platform_r1.xml @@ -0,0 +1,253 @@ + + + + + + 2 + + 2 + + + + MG_ON_PORT_TYPE + + MG_ON_PORT_TYPE + MG_ON_OPTICAL_PORT_WAVEBAND + + + + MG_ON_PORT_DIRECTION + + MG_ON_PORT_DIRECTION + OUTPUT + + + + MG_ON_PORT_DEGREE + + MG_ON_PORT_DEGREE + D1 + + + + + + 12 + + 12 + + + + MG_ON_PORT_TYPE + + MG_ON_PORT_TYPE + MG_ON_OPTICAL_PORT_WAVEBAND + + + + MG_ON_PORT_DIRECTION + + MG_ON_PORT_DIRECTION + INPUT + + + + MG_ON_PORT_DEGREE + + MG_ON_PORT_DEGREE + D1 + + + + + + 3 + + 3 + + + + MG_ON_PORT_TYPE + + MG_ON_PORT_TYPE + MG_ON_OPTICAL_PORT_WAVEBAND + + + + MG_ON_PORT_DIRECTION + + MG_ON_PORT_DIRECTION + OUTPUT + + + + MG_ON_PORT_DEGREE + + MG_ON_PORT_DEGREE + D2 + + + + + + 13 + + 13 + + + + MG_ON_PORT_TYPE + + MG_ON_PORT_TYPE + MG_ON_OPTICAL_PORT_WAVEBAND + + + + MG_ON_PORT_DIRECTION + + MG_ON_PORT_DIRECTION + INPUT + + + + MG_ON_PORT_DEGREE + + MG_ON_PORT_DEGREE + D2 + + + + + + 4 + + 4 + + + + MG_ON_PORT_TYPE + + MG_ON_PORT_TYPE + MG_ON_OPTICAL_PORT_WAVEBAND + + + + MG_ON_PORT_DIRECTION + + MG_ON_PORT_DIRECTION + OUTPUT + + + + MG_ON_PORT_DEGREE + + MG_ON_PORT_DEGREE + D2 + + + + + + 14 + + 14 + + + + MG_ON_PORT_TYPE + + MG_ON_PORT_TYPE + MG_ON_OPTICAL_PORT_WAVEBAND + + + + MG_ON_PORT_DIRECTION + + MG_ON_PORT_DIRECTION + INPUT + + + + MG_ON_PORT_DEGREE + + MG_ON_PORT_DEGREE + D2 + + + + + + + 101 + + 101 + + + + MG_ON_PORT_TYPE + + MG_ON_PORT_TYPE + MG_ON_OPTICAL_PORT_WAVEBAND + + + + MG_ON_PORT_DIRECTION + + MG_ON_PORT_DIRECTION + OUTPUT + + + + MG_ON_PORT_DEGREE + + MG_ON_PORT_DEGREE + D2 + + + + + + 111 + + 111 + + + + MG_ON_PORT_TYPE + + MG_ON_PORT_TYPE + MG_ON_OPTICAL_PORT_WAVEBAND + + + + MG_ON_PORT_DIRECTION + + MG_ON_PORT_DIRECTION + INPUT + + + + MG_ON_PORT_DEGREE + + MG_ON_PORT_DEGREE + D2 + + + + + + \ No newline at end of file diff --git a/src/tests/ofc25/node-agents-config/platform_r2.xml b/src/tests/ofc25/node-agents-config/platform_r2.xml new file mode 100644 index 0000000000000000000000000000000000000000..6a50928622eadcc16c4c96aeeb43bc4697ba5d94 --- /dev/null +++ b/src/tests/ofc25/node-agents-config/platform_r2.xml @@ -0,0 +1,253 @@ + + + + + + 2 + + 2 + + + + MG_ON_PORT_TYPE + + MG_ON_PORT_TYPE + MG_ON_OPTICAL_PORT_WAVEBAND + + + + MG_ON_PORT_DIRECTION + + MG_ON_PORT_DIRECTION + OUTPUT + + + + MG_ON_PORT_DEGREE + + MG_ON_PORT_DEGREE + D1 + + + + + + 12 + + 12 + + + + MG_ON_PORT_TYPE + + MG_ON_PORT_TYPE + MG_ON_OPTICAL_PORT_WAVEBAND + + + + MG_ON_PORT_DIRECTION + + MG_ON_PORT_DIRECTION + INPUT + + + + MG_ON_PORT_DEGREE + + MG_ON_PORT_DEGREE + D1 + + + + + + 3 + + 3 + + + + MG_ON_PORT_TYPE + + MG_ON_PORT_TYPE + MG_ON_OPTICAL_PORT_WAVEBAND + + + + MG_ON_PORT_DIRECTION + + MG_ON_PORT_DIRECTION + OUTPUT + + + + MG_ON_PORT_DEGREE + + MG_ON_PORT_DEGREE + D2 + + + + + + 13 + + 13 + + + + MG_ON_PORT_TYPE + + MG_ON_PORT_TYPE + MG_ON_OPTICAL_PORT_WAVEBAND + + + + MG_ON_PORT_DIRECTION + + MG_ON_PORT_DIRECTION + INPUT + + + + MG_ON_PORT_DEGREE + + MG_ON_PORT_DEGREE + D2 + + + + + + 4 + + 4 + + + + MG_ON_PORT_TYPE + + MG_ON_PORT_TYPE + MG_ON_OPTICAL_PORT_WAVEBAND + + + + MG_ON_PORT_DIRECTION + + MG_ON_PORT_DIRECTION + OUTPUT + + + + MG_ON_PORT_DEGREE + + MG_ON_PORT_DEGREE + D2 + + + + + + 14 + + 14 + + + + MG_ON_PORT_TYPE + + MG_ON_PORT_TYPE + MG_ON_OPTICAL_PORT_WAVEBAND + + + + MG_ON_PORT_DIRECTION + + MG_ON_PORT_DIRECTION + INPUT + + + + MG_ON_PORT_DEGREE + + MG_ON_PORT_DEGREE + D2 + + + + + + + 101 + + 101 + + + + MG_ON_PORT_TYPE + + MG_ON_PORT_TYPE + MG_ON_OPTICAL_PORT_WAVEBAND + + + + MG_ON_PORT_DIRECTION + + MG_ON_PORT_DIRECTION + OUTPUT + + + + MG_ON_PORT_DEGREE + + MG_ON_PORT_DEGREE + D2 + + + + + + 111 + + 111 + + + + MG_ON_PORT_TYPE + + MG_ON_PORT_TYPE + MG_ON_OPTICAL_PORT_WAVEBAND + + + + MG_ON_PORT_DIRECTION + + MG_ON_PORT_DIRECTION + INPUT + + + + MG_ON_PORT_DEGREE + + MG_ON_PORT_DEGREE + D2 + + + + + + \ No newline at end of file diff --git a/src/tests/ofc25/node-agents-config/platform_t1.xml b/src/tests/ofc25/node-agents-config/platform_t1.xml new file mode 100644 index 0000000000000000000000000000000000000000..44d58adff0ab5411c7b0cc035570c76b4972a881 --- /dev/null +++ b/src/tests/ofc25/node-agents-config/platform_t1.xml @@ -0,0 +1,311 @@ + + + + + + device + + device + + + MellanoxSwitch + SSSA-CNIT + 1.0.0 + 1.0.0 + 1.0.0 + 610610 + typex:OPERATING_SYSTEM + + + + channel-1 + + channel-1 + + + channel-1 + typex:OPTICAL_CHANNEL + + + + 191600000 + 100 + 0 + transceiver-1 + + + 191600000 + 0 + 0 + transceiver-1 + 1 + + 0 + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + 0 + + + + + + transceiver-1 + + transceiver-1 + + + transceiver-1 + typex:TRANSCEIVER + + + + true + typex:QSFP56_DD_TYPE1 + typex:ETH_400GBASE_ZR + typex:FEC_AUTO + typex:TYPE_DIGITAL_COHERENT_OPTIC + + + true + typex:QSFP56_DD_TYPE1 + typex:ETH_400GBASE_ZR + typex:FEC_AUTO + typex:TYPE_DIGITAL_COHERENT_OPTIC + Cisco + 400zr-QSFP-DD + 01 + 1567321 + + + + 1 + + 1 + channel-1 + + + + + + + + port-1 + + port-1 + + + port-1 + typex:PORT + + + + channel-1 + + channel-1 + + + channel-1 + + + + + + onos-index + + onos-index + 4 + + + onos-index + 4 + + + + odtn-port-type + + odtn-port-type + line + + + odtn-port-type + line + + + + + + + + + + + + 1 + + 1 + Logical channel 1 + DISABLED + type:PROT_OTN + NONE + + + 1 + Logical channel 1 + DISABLED + type:PROT_OTN + NONE + UP + + + + transceiver-1 + + + transceiver-1 + + + + + test1 + test1 + + + test1 + test1 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0 + 0.0 + 0.0 + 0.0 + 0 + + + 0.0 + 0.0 + 0.0 + 0.0 + 0 + + + + + + 1 + + 1 + Optical channel assigned 100 + 100 + OPTICAL_CHANNEL + channel-1 + + + 1 + Optical channel assigned 100 + 100 + OPTICAL_CHANNEL + channel-1 + + + + + + + + 1 + + 1 + FEC1 + Ericsson + + + + 2 + + 2 + FEC2 + Ericsson + + + + + diff --git a/src/tests/ofc25/node-agents-config/platform_t2.xml b/src/tests/ofc25/node-agents-config/platform_t2.xml new file mode 100644 index 0000000000000000000000000000000000000000..933c2faffb3263c3cfae76166171e2c7c7d0213d --- /dev/null +++ b/src/tests/ofc25/node-agents-config/platform_t2.xml @@ -0,0 +1,311 @@ + + + + + + device + + device + + + MellanoxSwitch + SSSA-CNIT + 1.0.0 + 1.0.0 + 1.0.0 + 610610 + typex:OPERATING_SYSTEM + + + + channel-6 + + channel-6 + + + channel-6 + typex:OPTICAL_CHANNEL + + + + 191600000 + 100 + 0 + transceiver-6 + + + 191600000 + 0 + 0 + transceiver-6 + 1 + + 0 + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + 0 + + + + + + transceiver-6 + + transceiver-6 + + + transceiver-6 + typex:TRANSCEIVER + + + + true + typex:QSFP56_DD_TYPE1 + typex:ETH_400GBASE_ZR + typex:FEC_AUTO + typex:TYPE_DIGITAL_COHERENT_OPTIC + + + true + typex:QSFP56_DD_TYPE1 + typex:ETH_400GBASE_ZR + typex:FEC_AUTO + typex:TYPE_DIGITAL_COHERENT_OPTIC + Cisco + 400zr-QSFP-DD + 01 + 1567321 + + + + 1 + + 1 + channel-6 + + + + + + + + port-6 + + port-6 + + + port-6 + typex:PORT + + + + channel-6 + + channel-6 + + + channel-6 + + + + + + onos-index + + onos-index + 4 + + + onos-index + 4 + + + + odtn-port-type + + odtn-port-type + line + + + odtn-port-type + line + + + + + + + + + + + + 4 + + 4 + Logical channel 4 + DISABLED + type:PROT_OTN + NONE + + + 4 + Logical channel 4 + DISABLED + type:PROT_OTN + NONE + UP + + + + transceiver-6 + + + transceiver-6 + + + + + test1 + test1 + + + test1 + test1 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0 + 0.0 + 0.0 + 0.0 + 0 + + + 0.0 + 0.0 + 0.0 + 0.0 + 0 + + + + + + 1 + + 1 + Optical channel assigned 100 + 100 + OPTICAL_CHANNEL + channel-6 + + + 1 + Optical channel assigned 100 + 100 + OPTICAL_CHANNEL + channel-6 + + + + + + + + 1 + + 1 + FEC1 + Ericsson + + + + 2 + + 2 + FEC2 + Ericsson + + + + + diff --git a/src/tests/ofc25/node-agents-config/startNetconfAgent-mg-on.sh b/src/tests/ofc25/node-agents-config/startNetconfAgent-mg-on.sh new file mode 100755 index 0000000000000000000000000000000000000000..4d9247daf6677a78fa864e7c95c7ce1e4f0d66f1 --- /dev/null +++ b/src/tests/ofc25/node-agents-config/startNetconfAgent-mg-on.sh @@ -0,0 +1,28 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +echo 'Cleaning...' +make clean + +echo 'Rebuilding...' +make all + +echo 'Initializing database...' +cp platform.xml confd-cdb/ + +echo 'Starting ConfD...' +make start2 + +echo 'ConfD Ready!!' diff --git a/src/tests/ofc25/node-agents-config/startNetconfAgent-tp.sh b/src/tests/ofc25/node-agents-config/startNetconfAgent-tp.sh new file mode 100755 index 0000000000000000000000000000000000000000..47d19b07137bc7deedf9bea6851509a1d08826ea --- /dev/null +++ b/src/tests/ofc25/node-agents-config/startNetconfAgent-tp.sh @@ -0,0 +1,30 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +echo 'Cleaning...' +make clean + +echo 'Rebuilding...' +make all + +echo 'Initializing database...' +cp platform.xml confd-cdb/ +cp interfaces.xml confd-cdb/ +cp bgp.xml confd-cdb/ + +echo 'Starting ConfD...' +make start2 + +echo 'ConfD Ready!!' diff --git a/src/tests/ofc25/node-agents-config/transponders_x4.xml b/src/tests/ofc25/node-agents-config/transponders_x4.xml new file mode 100644 index 0000000000000000000000000000000000000000..f5cbc2cd39f131176c633507e8fcc2195187c38e --- /dev/null +++ b/src/tests/ofc25/node-agents-config/transponders_x4.xml @@ -0,0 +1,1055 @@ + + + + + + device + + device + + + MellanoxSwitch + SSSA-CNIT + 1.0.0 + 1.0.0 + 1.0.0 + 610610 + typex:OPERATING_SYSTEM + + + + channel-1 + + channel-1 + + + channel-1 + typex:OPTICAL_CHANNEL + + + + 191600000 + 100 + 0 + transceiver-1 + + + 191600000 + 0 + 0 + transceiver-1 + 1 + + 0 + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + 0 + + + + + + channel-2 + + channel-2 + + + channel-2 + typex:OPTICAL_CHANNEL + + + + 191600000 + 100 + 0 + transceiver-2 + + + 191600000 + 0 + 0 + transceiver-2 + 1 + + 0 + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + 0 + + + + + + channel-3 + + channel-3 + + + channel-3 + typex:OPTICAL_CHANNEL + + + + 191600000 + 100 + 0 + transceiver-3 + + + 191600000 + 0 + 0 + transceiver-3 + 1 + + 0 + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + 0 + + + + + + channel-4 + + channel-4 + + + channel-4 + typex:OPTICAL_CHANNEL + + + + 191600000 + 100 + 0 + transceiver-4 + + + 191600000 + 0 + 0 + transceiver-4 + 1 + + 0 + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + 0 + + + + + + transceiver-1 + + transceiver-1 + + + transceiver-1 + typex:TRANSCEIVER + + + + true + typex:QSFP56_DD_TYPE1 + typex:ETH_400GBASE_ZR + typex:FEC_AUTO + typex:TYPE_DIGITAL_COHERENT_OPTIC + + + true + typex:QSFP56_DD_TYPE1 + typex:ETH_400GBASE_ZR + typex:FEC_AUTO + typex:TYPE_DIGITAL_COHERENT_OPTIC + Cisco + 400zr-QSFP-DD + 01 + 1567321 + + + + 1 + + 1 + channel-1 + + + + + + + transceiver-2 + + transceiver-2 + + + transceiver-2 + typex:TRANSCEIVER + + + + true + typex:QSFP56_DD_TYPE1 + typex:ETH_400GBASE_ZR + typex:FEC_AUTO + typex:TYPE_DIGITAL_COHERENT_OPTIC + + + true + typex:QSFP56_DD_TYPE1 + typex:ETH_400GBASE_ZR + typex:FEC_AUTO + typex:TYPE_DIGITAL_COHERENT_OPTIC + Cisco + 400zr-QSFP-DD + 01 + 1567321 + + + + 1 + + 1 + channel-2 + + + + + + + transceiver-3 + + transceiver-3 + + + transceiver-3 + typex:TRANSCEIVER + + + + true + typex:QSFP56_DD_TYPE1 + typex:ETH_400GBASE_ZR + typex:FEC_AUTO + typex:TYPE_DIGITAL_COHERENT_OPTIC + + + true + typex:QSFP56_DD_TYPE1 + typex:ETH_400GBASE_ZR + typex:FEC_AUTO + typex:TYPE_DIGITAL_COHERENT_OPTIC + Cisco + 400zr-QSFP-DD + 01 + 1567321 + + + + 1 + + 1 + channel-3 + + + + + + + + transceiver-4 + + transceiver-4 + + + transceiver-4 + typex:TRANSCEIVER + + + + true + typex:QSFP56_DD_TYPE1 + typex:ETH_400GBASE_ZR + typex:FEC_AUTO + typex:TYPE_DIGITAL_COHERENT_OPTIC + + + true + typex:QSFP56_DD_TYPE1 + typex:ETH_400GBASE_ZR + typex:FEC_AUTO + typex:TYPE_DIGITAL_COHERENT_OPTIC + Cisco + 400zr-QSFP-DD + 01 + 1567321 + + + + 1 + + 1 + channel-4 + + + + + + + port-1 + + port-1 + + + port-1 + typex:PORT + + + + channel-1 + + channel-1 + + + channel-1 + + + + + + onos-index + + onos-index + 1 + + + onos-index + 1 + + + + odtn-port-type + + odtn-port-type + line + + + odtn-port-type + line + + + + + + port-2 + + port-2 + + + port-2 + typex:PORT + + + + channel-2 + + channel-2 + + + channel-2 + + + + + + onos-index + + onos-index + 2 + + + onos-index + 2 + + + + odtn-port-type + + odtn-port-type + line + + + odtn-port-type + line + + + + + + port-3 + + port-3 + + + port-3 + typex:PORT + + + + channel-3 + + channel-3 + + + channel-3 + + + + + + onos-index + + onos-index + 3 + + + onos-index + 3 + + + + odtn-port-type + + odtn-port-type + line + + + odtn-port-type + line + + + + + + port-4 + + port-4 + + + port-4 + typex:PORT + + + + channel-4 + + channel-4 + + + channel-4 + + + + + + onos-index + + onos-index + 4 + + + onos-index + 4 + + + + odtn-port-type + + odtn-port-type + line + + + odtn-port-type + line + + + + + + + + + + + 1 + + 1 + Logical channel 1 + DISABLED + type:PROT_OTN + NONE + + + 1 + Logical channel 1 + DISABLED + type:PROT_OTN + NONE + UP + + + + transceiver-1 + + + transceiver-1 + + + + + test1 + test1 + + + test1 + test1 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0 + 0.0 + 0.0 + 0.0 + 0 + + + 0.0 + 0.0 + 0.0 + 0.0 + 0 + + + + + + 1 + + 1 + Optical channel assigned 100 + 100 + OPTICAL_CHANNEL + channel-1 + + + 1 + Optical channel assigned 100 + 100 + OPTICAL_CHANNEL + channel-1 + + + + + + + + 2 + + 2 + Logical channel 2 + DISABLED + type:PROT_OTN + NONE + + + 2 + Logical channel 2 + DISABLED + type:PROT_OTN + NONE + UP + + + + transceiver-2 + + + transceiver-2 + + + + + test1 + test1 + + + test1 + test1 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0 + 0.0 + 0.0 + 0.0 + 0 + + + 0.0 + 0.0 + 0.0 + 0.0 + 0 + + + + + + 1 + + 1 + Optical channel assigned 100 + 100 + OPTICAL_CHANNEL + channel-2 + + + 1 + Optical channel assigned 100 + 100 + OPTICAL_CHANNEL + channel-2 + + + + + + + + 3 + + 3 + Logical channel 3 + DISABLED + type:PROT_OTN + NONE + + + 3 + Logical channel 3 + DISABLED + type:PROT_OTN + NONE + UP + + + + transceiver-3 + + + transceiver-3 + + + + + test1 + test1 + + + test1 + test1 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0 + 0.0 + 0.0 + 0.0 + 0 + + + 0.0 + 0.0 + 0.0 + 0.0 + 0 + + + + + + 1 + + 1 + Optical channel assigned 100 + 100 + OPTICAL_CHANNEL + channel-3 + + + 1 + Optical channel assigned 100 + 100 + OPTICAL_CHANNEL + channel-3 + + + + + + + + 4 + + 4 + Logical channel 4 + DISABLED + type:PROT_OTN + NONE + + + 4 + Logical channel 4 + DISABLED + type:PROT_OTN + NONE + UP + + + + transceiver-4 + + + transceiver-4 + + + + + test1 + test1 + + + test1 + test1 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0 + 0.0 + 0.0 + 0.0 + 0 + + + 0.0 + 0.0 + 0.0 + 0.0 + 0 + + + + + + 1 + + 1 + Optical channel assigned 100 + 100 + OPTICAL_CHANNEL + channel-4 + + + 1 + Optical channel assigned 100 + 100 + OPTICAL_CHANNEL + channel-4 + + + + + + + + 1 + + 1 + FEC1 + Ericsson + + + + 2 + + 2 + FEC2 + Ericsson + + + + + + diff --git a/src/tests/ofc25/node-agents-config/transponders_x4_2.xml b/src/tests/ofc25/node-agents-config/transponders_x4_2.xml new file mode 100644 index 0000000000000000000000000000000000000000..c70e120ca27f57de6113d3f00ccf45cfd3fe38c6 --- /dev/null +++ b/src/tests/ofc25/node-agents-config/transponders_x4_2.xml @@ -0,0 +1,1055 @@ + + + + + + device + + device + + + MellanoxSwitch + SSSA-CNIT + 1.0.0 + 1.0.0 + 1.0.0 + 610610 + typex:OPERATING_SYSTEM + + + + channel-5 + + channel-5 + + + channel-5 + typex:OPTICAL_CHANNEL + + + + 191600000 + 100 + 0 + transceiver-5 + + + 191600000 + 0 + 0 + transceiver-5 + 1 + + 0 + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + 0 + + + + + + channel-6 + + channel-6 + + + channel-6 + typex:OPTICAL_CHANNEL + + + + 191600000 + 100 + 0 + transceiver-6 + + + 191600000 + 0 + 0 + transceiver-6 + 1 + + 0 + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + 0 + + + + + + channel-7 + + channel-7 + + + channel-7 + typex:OPTICAL_CHANNEL + + + + 191600000 + 100 + 0 + transceiver-7 + + + 191600000 + 0 + 0 + transceiver-7 + 1 + + 0 + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + 0 + + + + + + channel-8 + + channel-8 + + + channel-8 + typex:OPTICAL_CHANNEL + + + + 191600000 + 100 + 0 + transceiver-8 + + + 191600000 + 0 + 0 + transceiver-8 + 1 + + 0 + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + 0 + + + + + + transceiver-5 + + transceiver-5 + + + transceiver-5 + typex:TRANSCEIVER + + + + true + typex:QSFP56_DD_TYPE1 + typex:ETH_400GBASE_ZR + typex:FEC_AUTO + typex:TYPE_DIGITAL_COHERENT_OPTIC + + + true + typex:QSFP56_DD_TYPE1 + typex:ETH_400GBASE_ZR + typex:FEC_AUTO + typex:TYPE_DIGITAL_COHERENT_OPTIC + Cisco + 400zr-QSFP-DD + 01 + 1567321 + + + + 1 + + 1 + channel-5 + + + + + + + transceiver-6 + + transceiver-6 + + + transceiver-6 + typex:TRANSCEIVER + + + + true + typex:QSFP56_DD_TYPE1 + typex:ETH_400GBASE_ZR + typex:FEC_AUTO + typex:TYPE_DIGITAL_COHERENT_OPTIC + + + true + typex:QSFP56_DD_TYPE1 + typex:ETH_400GBASE_ZR + typex:FEC_AUTO + typex:TYPE_DIGITAL_COHERENT_OPTIC + Cisco + 400zr-QSFP-DD + 01 + 1567321 + + + + 1 + + 1 + channel-6 + + + + + + + transceiver-7 + + transceiver-7 + + + transceiver-7 + typex:TRANSCEIVER + + + + true + typex:QSFP56_DD_TYPE1 + typex:ETH_400GBASE_ZR + typex:FEC_AUTO + typex:TYPE_DIGITAL_COHERENT_OPTIC + + + true + typex:QSFP56_DD_TYPE1 + typex:ETH_400GBASE_ZR + typex:FEC_AUTO + typex:TYPE_DIGITAL_COHERENT_OPTIC + Cisco + 400zr-QSFP-DD + 01 + 1567321 + + + + 1 + + 1 + channel-7 + + + + + + + + transceiver-8 + + transceiver-8 + + + transceiver-8 + typex:TRANSCEIVER + + + + true + typex:QSFP56_DD_TYPE1 + typex:ETH_400GBASE_ZR + typex:FEC_AUTO + typex:TYPE_DIGITAL_COHERENT_OPTIC + + + true + typex:QSFP56_DD_TYPE1 + typex:ETH_400GBASE_ZR + typex:FEC_AUTO + typex:TYPE_DIGITAL_COHERENT_OPTIC + Cisco + 400zr-QSFP-DD + 01 + 1567321 + + + + 1 + + 1 + channel-8 + + + + + + + port-5 + + port-5 + + + port-5 + typex:PORT + + + + channel-5 + + channel-5 + + + channel-5 + + + + + + onos-index + + onos-index + 5 + + + onos-index + 5 + + + + odtn-port-type + + odtn-port-type + line + + + odtn-port-type + line + + + + + + port-6 + + port-6 + + + port-6 + typex:PORT + + + + channel-6 + + channel-6 + + + channel-6 + + + + + + onos-index + + onos-index + 6 + + + onos-index + 6 + + + + odtn-port-type + + odtn-port-type + line + + + odtn-port-type + line + + + + + + port-7 + + port-7 + + + port-7 + typex:PORT + + + + channel-7 + + channel-7 + + + channel-7 + + + + + + onos-index + + onos-index + 7 + + + onos-index + 7 + + + + odtn-port-type + + odtn-port-type + line + + + odtn-port-type + line + + + + + + port-8 + + port-8 + + + port-8 + typex:PORT + + + + channel-8 + + channel-8 + + + channel-8 + + + + + + onos-index + + onos-index + 8 + + + onos-index + 8 + + + + odtn-port-type + + odtn-port-type + line + + + odtn-port-type + line + + + + + + + + + + + 5 + + 5 + Logical channel 5 + DISABLED + type:PROT_OTN + NONE + + + 5 + Logical channel 5 + DISABLED + type:PROT_OTN + NONE + UP + + + + transceiver-5 + + + transceiver-5 + + + + + test1 + test1 + + + test1 + test1 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0 + 0.0 + 0.0 + 0.0 + 0 + + + 0.0 + 0.0 + 0.0 + 0.0 + 0 + + + + + + 1 + + 1 + Optical channel assigned 100 + 100 + OPTICAL_CHANNEL + channel-5 + + + 1 + Optical channel assigned 100 + 100 + OPTICAL_CHANNEL + channel-5 + + + + + + + + 6 + + 6 + Logical channel 6 + DISABLED + type:PROT_OTN + NONE + + + 6 + Logical channel 6 + DISABLED + type:PROT_OTN + NONE + UP + + + + transceiver-6 + + + transceiver-6 + + + + + test1 + test1 + + + test1 + test1 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0 + 0.0 + 0.0 + 0.0 + 0 + + + 0.0 + 0.0 + 0.0 + 0.0 + 0 + + + + + + 1 + + 1 + Optical channel assigned 100 + 100 + OPTICAL_CHANNEL + channel-6 + + + 1 + Optical channel assigned 100 + 100 + OPTICAL_CHANNEL + channel-6 + + + + + + + + 7 + + 7 + Logical channel 7 + DISABLED + type:PROT_OTN + NONE + + + 7 + Logical channel 7 + DISABLED + type:PROT_OTN + NONE + UP + + + + transceiver-7 + + + transceiver-7 + + + + + test1 + test1 + + + test1 + test1 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0 + 0.0 + 0.0 + 0.0 + 0 + + + 0.0 + 0.0 + 0.0 + 0.0 + 0 + + + + + + 1 + + 1 + Optical channel assigned 100 + 100 + OPTICAL_CHANNEL + channel-7 + + + 1 + Optical channel assigned 100 + 100 + OPTICAL_CHANNEL + channel-7 + + + + + + + + 8 + + 8 + Logical channel 8 + DISABLED + type:PROT_OTN + NONE + + + 8 + Logical channel 8 + DISABLED + type:PROT_OTN + NONE + UP + + + + transceiver-8 + + + transceiver-8 + + + + + test1 + test1 + + + test1 + test1 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0 + 0.0 + 0.0 + 0.0 + 0 + + + 0.0 + 0.0 + 0.0 + 0.0 + 0 + + + + + + 1 + + 1 + Optical channel assigned 100 + 100 + OPTICAL_CHANNEL + channel-8 + + + 1 + Optical channel assigned 100 + 100 + OPTICAL_CHANNEL + channel-8 + + + + + + + + 1 + + 1 + FEC1 + Ericsson + + + + 2 + + 2 + FEC2 + Ericsson + + + + + + diff --git a/src/tests/ofc25/requirements.in b/src/tests/ofc25/requirements.in new file mode 100644 index 0000000000000000000000000000000000000000..5c92783a232a5bbe18b4dd6d0e6735e3ce8414c2 --- /dev/null +++ b/src/tests/ofc25/requirements.in @@ -0,0 +1,15 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +requests==2.27.* diff --git a/src/tests/ofc25/run_test.sh b/src/tests/ofc25/run_test.sh new file mode 100755 index 0000000000000000000000000000000000000000..aaad42f201f8adfc7f646f92c17bd665f7dd33e9 --- /dev/null +++ b/src/tests/ofc25/run_test.sh @@ -0,0 +1,124 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +PROJECTDIR=`pwd` +cd $PROJECTDIR/src +echo "Running OFC 25 tests from $PROJECTDIR/src ..." + +# Determine which test suite to run based on argument +TEST_SUITE=${1:-"all"} + +case "$TEST_SUITE" in + "all") + echo "=== Running Full Test Suite ===" + + echo "--- Running Optical Layer Initialization ---" + pytest --verbose tests/ofc25/tests/test_functional_bootstrap.py \ + --tfs-runtime-script=tfs_runtime_env_vars_opt.sh \ + --tfs-profile=opt \ + --tfs-topology-descriptor=topology_opt.json + echo "Waiting 5 seconds for initialization ..." + sleep 5 + + echo "--- Running IP/Packet Layer Initialization ---" + pytest --verbose tests/ofc25/tests/test_functional_bootstrap.py \ + --tfs-runtime-script=tfs_runtime_env_vars_ip.sh \ + --tfs-profile=ip \ + --tfs-topology-descriptor=topology_ip.json + echo "Waiting 5 seconds for initialization ..." + sleep 5 + + echo "--- Running E2E Layer Initialization ---" + pytest --verbose tests/ofc25/tests/test_functional_bootstrap.py \ + --tfs-runtime-script=tfs_runtime_env_vars_e2e.sh \ + --tfs-profile=e2e \ + --tfs-topology-descriptor=topology_e2e.json + echo "Waiting 5 seconds for initialization ..." + sleep 5 + + echo "--- Running Service Creation/Deletion ---" + pytest --verbose tests/ofc25/tests/test_functional_create_vlinks.py + sleep 5 + pytest --verbose tests/ofc25/tests/test_functional_delete_vlinks.py + + echo "--- Running Cleanup In Reverse Order ---" + pytest --verbose tests/ofc25/tests/test_functional_cleanup.py \ + --tfs-runtime-script=tfs_runtime_env_vars_e2e.sh \ + --tfs-profile=e2e \ + --tfs-topology-descriptor=topology_e2e.json + pytest --verbose tests/ofc25/tests/test_functional_cleanup.py \ + --tfs-runtime-script=tfs_runtime_env_vars_ip.sh \ + --tfs-profile=ip \ + --tfs-topology-descriptor=topology_ip.json + pytest --verbose tests/ofc25/tests/test_functional_cleanup.py \ + --tfs-runtime-script=tfs_runtime_env_vars_opt.sh \ + --tfs-profile=opt \ + --tfs-topology-descriptor=topology_opt.json + ;; + + "init_opt") + echo "=== Running Optical Layer Initialization ===" + pytest --verbose tests/ofc25/tests/test_functional_cleanup.py \ + --tfs-runtime-script=tfs_runtime_env_vars_opt.sh \ + --tfs-profile=opt \ + --tfs-topology-descriptor=topology_opt.json + pytest --verbose tests/ofc25/tests/test_functional_bootstrap.py \ + --tfs-runtime-script=tfs_runtime_env_vars_opt.sh \ + --tfs-profile=opt \ + --tfs-topology-descriptor=topology_opt.json + ;; + + "init_ip") + echo "=== Running IP/Packet Layer Initialization ===" + pytest --verbose tests/ofc25/tests/test_functional_cleanup.py \ + --tfs-runtime-script=tfs_runtime_env_vars_ip.sh \ + --tfs-profile=ip \ + --tfs-topology-descriptor=topology_ip.json + pytest --verbose tests/ofc25/tests/test_functional_bootstrap.py \ + --tfs-runtime-script=tfs_runtime_env_vars_ip.sh \ + --tfs-profile=ip \ + --tfs-topology-descriptor=topology_ip.json + ;; + + "init_e2e") + echo "=== Running E2E Layer Initialization ===" + pytest --verbose tests/ofc25/tests/test_functional_cleanup.py \ + --tfs-runtime-script=tfs_runtime_env_vars_e2e.sh \ + --tfs-profile=e2e \ + --tfs-topology-descriptor=topology_e2e.json + pytest --verbose tests/ofc25/tests/test_functional_bootstrap.py \ + --tfs-runtime-script=tfs_runtime_env_vars_e2e.sh \ + --tfs-profile=e2e \ + --tfs-topology-descriptor=topology_e2e.json + ;; + + "service") + echo "=== Running Service Creation/Deletion ===" + pytest --verbose tests/ofc25/tests/test_functional_create_vlinks.py + sleep 5 + pytest --verbose tests/ofc25/tests/test_functional_delete_vlinks.py + ;; + + *) + echo "Usage: $0 [init_opt|init_ip|init_e2e|service|all]" + echo " init_opt - Run optical layer initialization only" + echo " init_ip - Run IP/packet layer initialization only" + echo " init_e2e - Run E2E orchestration layer initialization only" + echo " service - Run service creation/deletion only" + echo " all - Run complete test suite (default)" + exit 1 + ;; +esac diff --git a/src/tests/ofc25/tests/Fixtures.py b/src/tests/ofc25/tests/Fixtures.py index 69e4cbaa96d4b613088730eb6bca5c50770d17c6..e798a0fefa45477c2548e34df1f7c781e726a549 100644 --- a/src/tests/ofc25/tests/Fixtures.py +++ b/src/tests/ofc25/tests/Fixtures.py @@ -12,28 +12,143 @@ # See the License for the specific language governing permissions and # limitations under the License. +import re +from dataclasses import dataclass +from pathlib import Path +from typing import Dict, Mapping, Optional + import pytest + from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient from service.client.ServiceClient import ServiceClient +from vnt_manager.client.VNTManagerClient import VNTManagerClient +PROFILE_OPT = 'opt' +PROFILE_IP = 'ip' +PROFILE_E2E = 'e2e' -@pytest.fixture(scope='session') -def context_client() -> ContextClient: - _client = ContextClient() - yield _client - _client.close() +RUNTIME_ENV_DIR = Path('/var/teraflow') +PROFILE_FILENAMES = { + PROFILE_OPT: 'tfs_runtime_env_vars_opt.sh', + PROFILE_IP: 'tfs_runtime_env_vars_ip.sh', + PROFILE_E2E: 'tfs_runtime_env_vars_e2e.sh', +} +EXPORT_REGEX = re.compile(r'^export\s+([A-Za-z_][A-Za-z0-9_]*)=(.*)$') + + +@dataclass(frozen=True) +class ServiceEndpoint: + host: str + port: int + + +@dataclass(frozen=True) +class TfsProfile: + name: str + env_vars: Mapping[str, str] + context: ServiceEndpoint + device: ServiceEndpoint + service: ServiceEndpoint + vnt_manager: Optional[ServiceEndpoint] + + +@dataclass(frozen=True) +class TfsClientBundle: + context: ContextClient + device: DeviceClient + service: ServiceClient + vnt_manager: Optional[VNTManagerClient] + env_vars: Mapping[str, str] + + +def _parse_runtime_env_file(filepath: Path) -> Dict[str, str]: + env_vars: Dict[str, str] = {} + for raw_line in filepath.read_text(encoding='utf-8').splitlines(): + line = raw_line.strip() + if not line or line.startswith('#'): + continue + match = EXPORT_REGEX.match(line) + if match is None: + continue + key, value = match.groups() + value = value.strip() + if (value.startswith('"') and value.endswith('"')) or \ + (value.startswith("'") and value.endswith("'")): + value = value[1:-1] + env_vars[key] = value + return env_vars + + +def _read_service_endpoint(env_vars: Mapping[str, str], service_name: str) -> ServiceEndpoint: + host_key = '{:s}_SERVICE_HOST'.format(service_name) + port_key = '{:s}_SERVICE_PORT_GRPC'.format(service_name) + + if host_key not in env_vars: + raise KeyError('Missing key "{:s}" in runtime env vars'.format(host_key)) + if port_key not in env_vars: + raise KeyError('Missing key "{:s}" in runtime env vars'.format(port_key)) + + return ServiceEndpoint(host=env_vars[host_key], port=int(env_vars[port_key])) + + +def _read_optional_service_endpoint(env_vars: Mapping[str, str], service_name: str) -> Optional[ServiceEndpoint]: + host_key = '{:s}_SERVICE_HOST'.format(service_name) + port_key = '{:s}_SERVICE_PORT_GRPC'.format(service_name) + if host_key not in env_vars or port_key not in env_vars: + return None + return ServiceEndpoint(host=env_vars[host_key], port=int(env_vars[port_key])) + + +def _load_tfs_profile(profile_name: str) -> TfsProfile: + filepath = RUNTIME_ENV_DIR / PROFILE_FILENAMES[profile_name] + if not filepath.exists(): + raise FileNotFoundError('Runtime env file not found: {:s}'.format(str(filepath))) + + env_vars = _parse_runtime_env_file(filepath) + return TfsProfile( + name=profile_name, + env_vars=env_vars, + context=_read_service_endpoint(env_vars, 'CONTEXTSERVICE'), + device=_read_service_endpoint(env_vars, 'DEVICESERVICE'), + service=_read_service_endpoint(env_vars, 'SERVICESERVICE'), + vnt_manager=_read_optional_service_endpoint(env_vars, 'VNT_MANAGERSERVICE'), + ) @pytest.fixture(scope='session') -def device_client() -> DeviceClient: - _client = DeviceClient() - yield _client - _client.close() +def tfs_profiles() -> Dict[str, TfsProfile]: + profiles: Dict[str, TfsProfile] = {} + for profile_name in [PROFILE_OPT, PROFILE_IP, PROFILE_E2E]: + filepath = RUNTIME_ENV_DIR / PROFILE_FILENAMES[profile_name] + if not filepath.exists(): + continue + profiles[profile_name] = _load_tfs_profile(profile_name) + if len(profiles) == 0: + raise FileNotFoundError('No runtime env files found in {:s}'.format(str(RUNTIME_ENV_DIR))) + return profiles @pytest.fixture(scope='session') -def service_client() -> ServiceClient: - _client = ServiceClient() - yield _client - _client.close() +def tfs_clients(tfs_profiles: Dict[str, TfsProfile]) -> Dict[str, TfsClientBundle]: + clients: Dict[str, TfsClientBundle] = {} + for profile_name, profile in tfs_profiles.items(): + clients[profile_name] = TfsClientBundle( + context=ContextClient(profile.context.host, profile.context.port), + device=DeviceClient(profile.device.host, profile.device.port), + service=ServiceClient(profile.service.host, profile.service.port), + vnt_manager=( + VNTManagerClient(profile.vnt_manager.host, profile.vnt_manager.port) + if profile.vnt_manager is not None else None + ), + env_vars=profile.env_vars, + ) + + yield clients + + for bundle in clients.values(): + bundle.context.close() + bundle.device.close() + bundle.service.close() + if bundle.vnt_manager is not None: + bundle.vnt_manager.close() diff --git a/src/tests/ofc25/tests/Helper.py b/src/tests/ofc25/tests/Helper.py new file mode 100644 index 0000000000000000000000000000000000000000..99f51849877812306a3834aae54e0937067b56a2 --- /dev/null +++ b/src/tests/ofc25/tests/Helper.py @@ -0,0 +1,400 @@ +# Copyright 2022-2026 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import os +import time +from typing import Dict, List, Optional, Set, Tuple + +from common.Constants import DEFAULT_CONTEXT_NAME +from common.proto.context_pb2 import ContextId, Device, Empty, Link, LinkTypeEnum, ServiceStatusEnum, ServiceTypeEnum +from common.tools.grpc.Tools import grpc_message_list_to_json_string, grpc_message_to_json, grpc_message_to_json_string +from common.tools.object_factory.Context import json_context_id + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) + +VIRTUAL_LINK_DESCRIPTORS = [ + ('virtual_link_01.json', 'IP1/PORT-xe1==IP2/PORT-xe1'), + ('virtual_link_02.json', 'IP1/PORT-xe2==IP2/PORT-xe2'), + ('virtual_link_03.json', 'IP1/PORT-xe3==IP2/PORT-xe3'), +] +DESCRIPTORS_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'descriptors') + + +def is_imported_device(device: Device) -> bool: + return device.HasField('controller_id') and bool(device.controller_id.device_uuid.uuid) + + +def split_imported_devices(devices: List[Device]) -> Tuple[List[Device], List[Device]]: + imported_devices = [device for device in devices if is_imported_device(device)] + local_devices = [device for device in devices if not is_imported_device(device)] + return local_devices, imported_devices + + +def log_device_inventory(context_client, profile_name: str, log_prefix: str = 'Device inventory') -> Tuple[List[Device], List[Device]]: + response = context_client.ListDevices(Empty()) + local_devices, imported_devices = split_imported_devices(response.devices) + + LOGGER.info( + '[%s] %s: total=%d local=%d imported=%d', + profile_name, + log_prefix, + len(response.devices), + len(local_devices), + len(imported_devices), + ) + LOGGER.info('[%s] Local devices: %s', profile_name, grpc_message_list_to_json_string(local_devices)) + LOGGER.info('[%s] Imported devices: %s', profile_name, grpc_message_list_to_json_string(imported_devices)) + return local_devices, imported_devices + + +def split_descriptor_links( + links: List[Link], descriptor_link_aliases: Set[str] +) -> Tuple[List[Link], List[Link], List[Link]]: + descriptor_links = [] + management_links = [] + imported_links = [] + + for link in links: + runtime_aliases = {link.link_id.link_uuid.uuid} + if link.name: + runtime_aliases.add(link.name) + + if runtime_aliases.intersection(descriptor_link_aliases): + descriptor_links.append(link) + elif _is_management_link(link): + management_links.append(link) + else: + imported_links.append(link) + + return descriptor_links, management_links, imported_links + + +def log_link_inventory(context_client, descriptor_loader, profile_name: str) -> Tuple[List[Link], List[Link], List[Link]]: + response = context_client.ListLinks(Empty()) + descriptor_link_aliases = set() + for link in descriptor_loader.links: + link_uuid = link.get('link_id', {}).get('link_uuid', {}).get('uuid') + if link_uuid: + descriptor_link_aliases.add(link_uuid) + link_name = link.get('name') + if link_name: + descriptor_link_aliases.add(link_name) + + descriptor_links, management_links, imported_links = split_descriptor_links( + response.links, descriptor_link_aliases + ) + + LOGGER.info( + '[%s] Descriptor validation link inventory: total=%d descriptor=%d management=%d imported=%d', + profile_name, + len(response.links), + len(descriptor_links), + len(management_links), + len(imported_links), + ) + LOGGER.info('[%s] Descriptor links: %s', profile_name, grpc_message_list_to_json_string(descriptor_links)) + LOGGER.info('[%s] Management links: %s', profile_name, grpc_message_list_to_json_string(management_links)) + LOGGER.info('[%s] Imported links: %s', profile_name, grpc_message_list_to_json_string(imported_links)) + return descriptor_links, management_links, imported_links + + +def _is_management_link(link: Link) -> bool: + if link.link_type == LinkTypeEnum.LINKTYPE_MANAGEMENT: + return True + + if 'mgmt' in link.name.lower(): + return True + + for endpoint_id in link.link_endpoint_ids: + if 'mgmt' in endpoint_id.endpoint_uuid.uuid.lower(): + return True + + return False + + +def validate_descriptor_state(context_client, descriptor_loader, profile_name: str) -> None: + contexts = context_client.ListContexts(Empty()) + assert len(contexts.contexts) == descriptor_loader.num_contexts + + for context_uuid, num_topologies in descriptor_loader.num_topologies.items(): + response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid))) + assert len(response.topologies) == num_topologies + + local_devices, imported_devices = log_device_inventory( + context_client, profile_name, log_prefix='Descriptor validation device inventory' + ) + assert len(local_devices) == descriptor_loader.num_devices + if imported_devices: + LOGGER.info( + '[%s] Ignoring %d imported devices for descriptor validation because they are learned via controllers', + profile_name, + len(imported_devices), + ) + + descriptor_links, management_links, imported_links = log_link_inventory( + context_client, descriptor_loader, profile_name + ) + assert len(descriptor_links) == descriptor_loader.num_links + if management_links: + LOGGER.info( + '[%s] Found %d management links auto-added during device import', + profile_name, + len(management_links), + ) + if imported_links: + LOGGER.info( + '[%s] Ignoring %d imported non-descriptor links during descriptor validation', + profile_name, + len(imported_links), + ) + + response = context_client.GetOpticalLinkList(Empty()) + assert len(response.optical_links) == descriptor_loader.num_optical_links + + for context_uuid, num_services in descriptor_loader.num_services.items(): + response = context_client.ListServices(ContextId(**json_context_id(context_uuid))) + assert len(response.services) == num_services + + for context_uuid, num_slices in descriptor_loader.num_slices.items(): + response = context_client.ListSlices(ContextId(**json_context_id(context_uuid))) + assert len(response.slices) == num_slices + + +def list_active_optical_services(context_client) -> List: + response = context_client.ListServices(ADMIN_CONTEXT_ID) + LOGGER.info('Services[%d] = %s', len(response.services), grpc_message_to_json_string(response)) + + active_optical_services = [] + for service in response.services: + if service.service_type != ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY: + continue + if service.service_status.service_status != ServiceStatusEnum.SERVICESTATUS_ACTIVE: + continue + active_optical_services.append(service) + return active_optical_services + + +def count_service_connections(context_client, service) -> int: + response = context_client.ListConnections(service.service_id) + LOGGER.info( + 'ServiceId[%s] => Connections[%d] = %s', + grpc_message_to_json_string(service.service_id), + len(response.connections), + grpc_message_to_json_string(response), + ) + return len(response.connections) + + +def describe_services(context_client, profile_name: str) -> str: + response = context_client.ListServices(ADMIN_CONTEXT_ID) + services = [] + for service in response.services: + service_json = grpc_message_to_json(service) + status_value = service.service_status.service_status + service_json['service_status_name'] = ServiceStatusEnum.Name(status_value) + try: + service_json['num_connections'] = count_service_connections(context_client, service) + except Exception as exc: # pylint: disable=broad-except + service_json['num_connections_error'] = str(exc) + services.append(service_json) + LOGGER.info('[%s] Service snapshot: %s', profile_name, str(services)) + return str(services) + + +def get_virtual_link_identifiers(context_client) -> Tuple[Set[str], Set[str]]: + response = context_client.ListLinks(Empty()) + virtual_link_uuids = set() + virtual_link_names = set() + for link in response.links: + if link.link_type != LinkTypeEnum.LINKTYPE_VIRTUAL: + continue + virtual_link_uuids.add(link.link_id.link_uuid.uuid) + if len(link.name) > 0: + virtual_link_names.add(link.name) + + LOGGER.info('VirtualLinkNames[%d] = %s', len(virtual_link_names), str(sorted(virtual_link_names))) + LOGGER.info('VirtualLinkUuids[%d] = %s', len(virtual_link_uuids), str(sorted(virtual_link_uuids))) + return virtual_link_uuids, virtual_link_names + + +def describe_links(context_client, profile_name: str) -> str: + response = context_client.ListLinks(Empty()) + links = [] + for link in response.links: + link_json = grpc_message_to_json(link) + link_json['link_type_name'] = LinkTypeEnum.Name(link.link_type) + links.append(link_json) + LOGGER.info('[%s] Link snapshot: %s', profile_name, str(links)) + return str(links) + + +def log_global_state(ip_context_client, e2e_context_client, opt_context_client) -> None: + describe_links(ip_context_client, 'ip') + describe_services(ip_context_client, 'ip') + describe_services(e2e_context_client, 'e2e') + describe_services(opt_context_client, 'opt') + + +def assert_expected_set(actual_items: Set[str], expected_items: Optional[Set[str]], label: str) -> None: + if expected_items is None: + return + + assert actual_items == expected_items, ( + '{:s} mismatch: expected={:s} actual={:s}'.format( + label, str(sorted(expected_items)), str(sorted(actual_items)) + ) + ) + + +def get_service_identifiers(service) -> Set[str]: + identifiers = {service.service_id.service_uuid.uuid} + if len(service.name) > 0: + identifiers.add(service.name) + return identifiers + + +def build_expected_optical_connections(expected_virtual_link_names: Set[str]) -> Dict[str, int]: + expected_connections = dict() + first_optical_service_name = VIRTUAL_LINK_DESCRIPTORS[0][1] + + for _, virtual_link_name in VIRTUAL_LINK_DESCRIPTORS: + if virtual_link_name not in expected_virtual_link_names: + continue + expected_connections[virtual_link_name] = 2 if virtual_link_name == first_optical_service_name else 1 + + return expected_connections + + +def assert_global_state( + ip_context_client, + e2e_context_client, + opt_context_client, + expected_virtual_link_uuids: Optional[Set[str]], + expected_virtual_link_names: Optional[Set[str]], + expected_e2e_services: int, + expected_opt_services: int, + expected_opt_connections: Optional[Dict[str, int]], +) -> None: + response = ip_context_client.ListServices(ADMIN_CONTEXT_ID) + assert len(response.services) == 0 + + virtual_link_uuids, virtual_link_names = get_virtual_link_identifiers(ip_context_client) + assert_expected_set(virtual_link_uuids, expected_virtual_link_uuids, 'Virtual link UUIDs') + assert_expected_set(virtual_link_names, expected_virtual_link_names, 'Virtual link names') + + e2e_services = list_active_optical_services(e2e_context_client) + if expected_e2e_services == 0: + response = e2e_context_client.ListServices(ADMIN_CONTEXT_ID) + assert len(response.services) == 0 + else: + assert len(e2e_services) == expected_e2e_services + for service in e2e_services: + assert count_service_connections(e2e_context_client, service) == 1 + + opt_services = list_active_optical_services(opt_context_client) + if expected_opt_services == 0: + response = opt_context_client.ListServices(ADMIN_CONTEXT_ID) + assert len(response.services) == 0 + else: + assert len(opt_services) == expected_opt_services + + if expected_opt_connections is not None: + unmatched_expected = dict(expected_opt_connections) + for service in opt_services: + service_identifiers = get_service_identifiers(service) + matching_identifiers = [ + identifier for identifier in service_identifiers if identifier in unmatched_expected + ] + assert len(matching_identifiers) == 1, ( + 'Unable to match optical service identifiers={:s} against expected={:s}'.format( + str(sorted(service_identifiers)), str(sorted(unmatched_expected.keys())) + ) + ) + + service_identifier = matching_identifiers[0] + actual_connections = count_service_connections(opt_context_client, service) + expected_connections = unmatched_expected.pop(service_identifier) + assert actual_connections == expected_connections, ( + 'Optical service {:s} connections mismatch: expected={:d} actual={:d}'.format( + service_identifier, expected_connections, actual_connections + ) + ) + + assert len(unmatched_expected) == 0, ( + 'Missing optical services for expected connection checks: {:s}'.format( + str(sorted(unmatched_expected.keys())) + ) + ) + + +def wait_for_state_or_raise( + ip_context_client, + e2e_context_client, + opt_context_client, + expected_virtual_link_uuids: Optional[Set[str]], + expected_virtual_link_names: Optional[Set[str]], + expected_e2e_services: int, + expected_opt_services: int, + expected_opt_connections: Optional[Dict[str, int]], + max_retry: int = 5, + wait_seconds: float = 15.0, +) -> None: + last_error: Exception = Exception('state not reached') + for attempt in range(1, max_retry + 1): + try: + LOGGER.info( + 'Checking expected state attempt %d/%d: virtual_link_uuids=%s virtual_link_names=%s ' + 'e2e_services=%d opt_services=%d opt_connections=%s', + attempt, + max_retry, + '' if expected_virtual_link_uuids is None else str(sorted(expected_virtual_link_uuids)), + '' if expected_virtual_link_names is None else str(sorted(expected_virtual_link_names)), + expected_e2e_services, + expected_opt_services, + expected_opt_connections, + ) + assert_global_state( + ip_context_client=ip_context_client, + e2e_context_client=e2e_context_client, + opt_context_client=opt_context_client, + expected_virtual_link_uuids=expected_virtual_link_uuids, + expected_virtual_link_names=expected_virtual_link_names, + expected_e2e_services=expected_e2e_services, + expected_opt_services=expected_opt_services, + expected_opt_connections=expected_opt_connections, + ) + return + except Exception as error: # pylint: disable=broad-except + last_error = error + LOGGER.warning( + 'Expected state not reached on attempt %d/%d: %s', + attempt, max_retry, str(error) + ) + log_global_state(ip_context_client, e2e_context_client, opt_context_client) + time.sleep(wait_seconds) + + LOGGER.error( + 'Timed out waiting expected state: virtual_link_uuids=%s virtual_link_names=%s ' + 'e2e_services=%d opt_services=%d opt_connections=%s', + '' if expected_virtual_link_uuids is None else str(sorted(expected_virtual_link_uuids)), + '' if expected_virtual_link_names is None else str(sorted(expected_virtual_link_names)), + expected_e2e_services, + expected_opt_services, + expected_opt_connections, + ) + raise last_error diff --git a/src/tests/ofc25/tests/conftest.py b/src/tests/ofc25/tests/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..d1a2e25fa9d9a0ac8752436c84bca56c06c9a5b4 --- /dev/null +++ b/src/tests/ofc25/tests/conftest.py @@ -0,0 +1,131 @@ +# Copyright 2022-2026 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import os +import tempfile +from pathlib import Path + +import pytest + +# pylint: disable=unused-import +from .Fixtures import ( + PROFILE_FILENAMES, PROFILE_E2E, PROFILE_IP, PROFILE_OPT, + RUNTIME_ENV_DIR, tfs_clients, tfs_profiles +) + +E2E_TOPOLOGY_DESCRIPTOR = 'topology_e2e.json' + + +def pytest_addoption(parser): + parser.addoption( + '--tfs-profile', + action='store', + choices=[PROFILE_OPT, PROFILE_IP, PROFILE_E2E], + default=None, + help='TFS deployment profile to use (opt|ip|e2e).', + ) + parser.addoption( + '--tfs-runtime-script', + action='store', + default=None, + help='Runtime env script filename under /var/teraflow.', + ) + parser.addoption( + '--tfs-topology-descriptor', + action='store', + default=None, + help='Topology descriptor filename (or absolute path).', + ) + + +def _require_option(request: pytest.FixtureRequest, option_name: str) -> str: + value = request.config.getoption(option_name) + if value is None: + raise ValueError('Missing required pytest option: --{:s}'.format(option_name.replace('_', '-'))) + return value + + +def _require_runner_ip() -> str: + runner_ip = os.environ.get('TFS_RUNNER_IP') + if runner_ip: + return runner_ip + raise RuntimeError( + 'Missing TFS_RUNNER_IP environment variable required to materialize the OFC25 E2E topology descriptor' + ) + + +def _materialize_e2e_descriptor(descriptor_path: Path) -> str: + runner_ip = _require_runner_ip() + descriptor_data = json.loads(descriptor_path.read_text(encoding='utf-8')) + + for device in descriptor_data.get('devices', []): + config_rules = device.get('device_config', {}).get('config_rules', []) + for config_rule in config_rules: + custom = config_rule.get('custom', {}) + if custom.get('resource_key') == '_connect/address': + custom['resource_value'] = runner_ip + + tmp_file = tempfile.NamedTemporaryFile( + mode='w', suffix='-ofc25-topology-e2e.json', prefix='codex-', delete=False, encoding='utf-8' + ) + with tmp_file: + json.dump(descriptor_data, tmp_file, indent=4) + tmp_file.write('\n') + + return tmp_file.name + + +@pytest.fixture(scope='session') +def selected_tfs_profile(request: pytest.FixtureRequest) -> str: + return _require_option(request, 'tfs_profile') + + +@pytest.fixture(scope='session') +def selected_runtime_script(request: pytest.FixtureRequest, selected_tfs_profile: str) -> str: + runtime_script = _require_option(request, 'tfs_runtime_script') + expected_script = PROFILE_FILENAMES[selected_tfs_profile] + if runtime_script != expected_script: + msg = 'Runtime script "{:s}" does not match profile "{:s}" (expected "{:s}")' + raise ValueError(msg.format(runtime_script, selected_tfs_profile, expected_script)) + runtime_file = RUNTIME_ENV_DIR / runtime_script + if not runtime_file.exists(): + raise FileNotFoundError('Runtime env file not found: {:s}'.format(str(runtime_file))) + return runtime_script + + +@pytest.fixture(scope='session') +def selected_topology_descriptor(request: pytest.FixtureRequest) -> str: + descriptor = _require_option(request, 'tfs_topology_descriptor') + descriptor_path = Path(descriptor) + if not descriptor_path.is_absolute(): + descriptor_path = Path(__file__).resolve().parent.parent / 'descriptors' / descriptor + if not descriptor_path.exists(): + raise FileNotFoundError('Topology descriptor not found: {:s}'.format(str(descriptor_path))) + + if descriptor_path.name == E2E_TOPOLOGY_DESCRIPTOR: + return _materialize_e2e_descriptor(descriptor_path) + + return str(descriptor_path) + + +@pytest.fixture(scope='session') +def selected_tfs_client_bundle( + tfs_clients, + selected_tfs_profile: str, + selected_runtime_script: str, # pylint: disable=unused-argument +): + if selected_tfs_profile not in tfs_clients: + raise KeyError('Profile "{:s}" not loaded in tfs_clients'.format(selected_tfs_profile)) + return tfs_clients[selected_tfs_profile] diff --git a/src/tests/ofc25/tests/create_service.py b/src/tests/ofc25/tests/create_service.py deleted file mode 100644 index 9e1fa73b065bfbdc295d11281657413260f138be..0000000000000000000000000000000000000000 --- a/src/tests/ofc25/tests/create_service.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import requests - -url = "http://localhost:8002/tfs-api/link/CSGW1_xe5-CSGW2_xe5" - -payload = { - "link_id": {"link_uuid": {"uuid": "CSGW1_xe5-CSGW2_xe5"}}, - "link_endpoint_ids": [ - { - "device_id": {"device_uuid": {"uuid": "CSGW1"}}, - "endpoint_uuid": {"uuid": "PORT-xe5"} - }, - { - "device_id": {"device_uuid": {"uuid": "CSGW2"}}, - "endpoint_uuid": {"uuid": "PORT-xe5"} - } - ], - "link_type": "LINKTYPE_VIRTUAL_COPPER", - "attributes": {"total_capacity_gbps": 1} -} -headers = { - "cookie": "session%3Aaa82129ced5debbb=eyJjc3JmX3Rva2VuIjoiZGI1ZjY5Yjg0MDgxMjk3YmU3ZTY2MDMxZTljYzdiYTZmMWVjZjc0NCJ9.ZijdlQ.xdcOryRyoRgXCJ2XYwczsHw4yIQ; session%3A53cf1bf28136ee51=eyJjc3JmX3Rva2VuIjoiMDFlNWQwNzUyNDM3MDU1NWZhZjE3MGFiYzg4NGY2YmE3Y2M5MjM4OCJ9.ZikGzQ.KkIdiPPvqaO2pyBi7-mnlTKnmWs; session%3Ae52730446648c30a=eyJjc3JmX3Rva2VuIjoiODI4NTUwOTc4MWMxYzVjNmQ2ZDBiNGViMmU4ZDJmMzYzMzUxY2M2OSJ9.ZyOPuA.LWyhgLGjWOCb1H6wKlsG0evCV-A", - "Content-Type": "application/json" -} - -response = requests.request("PUT", url, json=payload, headers=headers) - -print(response.text) \ No newline at end of file diff --git a/src/tests/ofc25/tests/delete_service.py b/src/tests/ofc25/tests/delete_service.py deleted file mode 100644 index 2ffaf16a2ef4487918d1124d56150871fcc2cf58..0000000000000000000000000000000000000000 --- a/src/tests/ofc25/tests/delete_service.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import requests - -url = "http://localhost:8002/tfs-api/link/CSGW1_xe5-CSGW2_xe5" - -payload = "" -headers = {"cookie": "session%3Aaa82129ced5debbb=eyJjc3JmX3Rva2VuIjoiZGI1ZjY5Yjg0MDgxMjk3YmU3ZTY2MDMxZTljYzdiYTZmMWVjZjc0NCJ9.ZijdlQ.xdcOryRyoRgXCJ2XYwczsHw4yIQ; session%3A53cf1bf28136ee51=eyJjc3JmX3Rva2VuIjoiMDFlNWQwNzUyNDM3MDU1NWZhZjE3MGFiYzg4NGY2YmE3Y2M5MjM4OCJ9.ZikGzQ.KkIdiPPvqaO2pyBi7-mnlTKnmWs; session%3Ae52730446648c30a=eyJjc3JmX3Rva2VuIjoiODI4NTUwOTc4MWMxYzVjNmQ2ZDBiNGViMmU4ZDJmMzYzMzUxY2M2OSJ9.ZyOPuA.LWyhgLGjWOCb1H6wKlsG0evCV-A"} - -response = requests.request("DELETE", url, data=payload, headers=headers) - -print(response.text) \ No newline at end of file diff --git a/src/tests/ofc25/tests/test_functional_bootstrap.py b/src/tests/ofc25/tests/test_functional_bootstrap.py new file mode 100644 index 0000000000000000000000000000000000000000..66296110d764b168fc3d1c9f96970f0755339ac3 --- /dev/null +++ b/src/tests/ofc25/tests/test_functional_bootstrap.py @@ -0,0 +1,104 @@ +# Copyright 2022-2026 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import time + +from common.Constants import DEFAULT_CONTEXT_NAME +from common.proto.context_pb2 import ContextId, DeviceOperationalStatusEnum, Empty +from common.tools.descriptor.Loader import DescriptorLoader, check_descriptor_load_results, validate_empty_scenario +from common.tools.grpc.Tools import grpc_message_list_to_json_string, grpc_message_to_json, grpc_message_to_json_string +from common.tools.object_factory.Context import json_context_id + +# pylint: disable=unused-import +from .conftest import ( + selected_tfs_client_bundle, selected_tfs_profile, selected_topology_descriptor +) +from .Helper import split_imported_devices, validate_descriptor_state + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) + + +def _check_devices_enabled_or_raise(context_client, profile_name: str, max_retry: int = 10, wait_seconds: float = 1.0) -> None: + op_status_enabled = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED + + num_devices = -1 + num_devices_enabled = 0 + num_retry = 0 + disabled_devices = list() + + while (num_retry < max_retry) and (num_devices != num_devices_enabled): + time.sleep(wait_seconds) + response = context_client.ListDevices(Empty()) + num_devices = len(response.devices) + local_devices, imported_devices = split_imported_devices(response.devices) + num_devices_enabled = 0 + disabled_devices = list() + for device in response.devices: + if device.device_operational_status == op_status_enabled: + num_devices_enabled += 1 + else: + disabled_devices.append(grpc_message_to_json(device)) + LOGGER.info( + '[%s] Num Devices enabled: %d/%d (local=%d imported=%d)', + profile_name, + num_devices_enabled, + num_devices, + len(local_devices), + len(imported_devices), + ) + LOGGER.info('[%s] Local devices: %s', profile_name, grpc_message_list_to_json_string(local_devices)) + LOGGER.info('[%s] Imported devices: %s', profile_name, grpc_message_list_to_json_string(imported_devices)) + num_retry += 1 + + if num_devices_enabled != num_devices: + msg = '[{:s}] Devices enabled timeout after {:d} retries: {:d}/{:d}; disabled={:s}' + raise Exception(msg.format(profile_name, max_retry, num_devices_enabled, num_devices, str(disabled_devices))) + + LOGGER.info('[%s] Devices: %s', profile_name, grpc_message_to_json_string(response)) + + +def test_scenario_bootstrap( + selected_tfs_client_bundle, + selected_tfs_profile: str, + selected_topology_descriptor: str, +) -> None: + context_client = selected_tfs_client_bundle.context + device_client = selected_tfs_client_bundle.device + + validate_empty_scenario(context_client) + + descriptor_loader = DescriptorLoader( + descriptors_file=selected_topology_descriptor, + context_client=context_client, + device_client=device_client, + ) + results = descriptor_loader.process() + check_descriptor_load_results(results, descriptor_loader) + validate_descriptor_state(context_client, descriptor_loader, selected_tfs_profile) + + response = context_client.GetContext(ADMIN_CONTEXT_ID) + assert len(response.service_ids) == 0 + assert len(response.slice_ids) == 0 + + +def test_scenario_devices_enabled( + selected_tfs_client_bundle, + selected_tfs_profile: str, +) -> None: + context_client = selected_tfs_client_bundle.context + _check_devices_enabled_or_raise(context_client, selected_tfs_profile) diff --git a/src/tests/ofc25/tests/test_functional_bootstrap_e2e.py b/src/tests/ofc25/tests/test_functional_bootstrap_e2e.py deleted file mode 100644 index 23586b5536d86e84e2502134252f4a37c03640d5..0000000000000000000000000000000000000000 --- a/src/tests/ofc25/tests/test_functional_bootstrap_e2e.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging, os, time - -from common.Constants import DEFAULT_CONTEXT_NAME -from common.proto.context_pb2 import ( - ContextId, DeviceOperationalStatusEnum, Empty, -) -from common.tools.descriptor.Loader import ( - DescriptorLoader, check_descriptor_load_results, validate_empty_scenario, -) -from common.tools.grpc.Tools import ( - grpc_message_to_json, grpc_message_to_json_string, -) -from common.tools.object_factory.Context import json_context_id -from context.client.ContextClient import ContextClient -from device.client.DeviceClient import DeviceClient - -from .Fixtures import ( - context_client, device_client, -) # pylint: disable=unused-import - -LOGGER = logging.getLogger(__name__) -LOGGER.setLevel(logging.DEBUG) - -DESCRIPTOR_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'descriptors', 'topology_e2e.json') -ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) - -def test_scenario_bootstrap( - context_client : ContextClient, # pylint: disable=redefined-outer-name - device_client : DeviceClient, # pylint: disable=redefined-outer-name -) -> None: - validate_empty_scenario(context_client) - - descriptor_loader = DescriptorLoader( - descriptors_file=DESCRIPTOR_FILE, context_client=context_client, device_client=device_client) - results = descriptor_loader.process() - check_descriptor_load_results(results, descriptor_loader) - descriptor_loader.validate() - - # Verify the scenario has no services/slices - response = context_client.GetContext(ADMIN_CONTEXT_ID) - assert len(response.service_ids) == 0 - assert len(response.slice_ids) == 0 - -def test_scenario_devices_enabled( - context_client : ContextClient, # pylint: disable=redefined-outer-name -) -> None: - """ - This test validates that the devices are enabled. - """ - DEVICE_OP_STATUS_ENABLED = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED - - num_devices = -1 - num_devices_enabled, num_retry = 0, 0 - while (num_devices != num_devices_enabled) and (num_retry < 10): - time.sleep(1.0) - response = context_client.ListDevices(Empty()) - num_devices = len(response.devices) - num_devices_enabled = 0 - disabled_devices = list() - for device in response.devices: - if device.device_operational_status == DEVICE_OP_STATUS_ENABLED: - num_devices_enabled += 1 - else: - disabled_devices.append(grpc_message_to_json(device)) - LOGGER.info('Num Devices enabled: {:d}/{:d}'.format(num_devices_enabled, num_devices)) - num_retry += 1 - if num_devices_enabled != num_devices: - LOGGER.info('Disabled Devices: {:s}'.format(str(disabled_devices))) - LOGGER.info('Devices: {:s}'.format(grpc_message_to_json_string(response))) - assert num_devices_enabled == num_devices diff --git a/src/tests/ofc25/tests/test_functional_bootstrap_ip.py b/src/tests/ofc25/tests/test_functional_bootstrap_ip.py deleted file mode 100644 index 501904d1f8e1fc16e67acc11d528362cbb15ab10..0000000000000000000000000000000000000000 --- a/src/tests/ofc25/tests/test_functional_bootstrap_ip.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging, os, time - -from common.Constants import DEFAULT_CONTEXT_NAME -from common.proto.context_pb2 import ( - ContextId, DeviceOperationalStatusEnum, Empty, -) -from common.tools.descriptor.Loader import ( - DescriptorLoader, check_descriptor_load_results, validate_empty_scenario, -) -from common.tools.object_factory.Context import json_context_id -from context.client.ContextClient import ContextClient -from device.client.DeviceClient import DeviceClient - -from .Fixtures import ( - context_client, device_client, -) # pylint: disable=unused-import - -LOGGER = logging.getLogger(__name__) -LOGGER.setLevel(logging.DEBUG) - -DESCRIPTOR_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'descriptors', 'topology_ip.json') -ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) - -def test_scenario_bootstrap( - context_client : ContextClient, # pylint: disable=redefined-outer-name - device_client : DeviceClient, # pylint: disable=redefined-outer-name -) -> None: - validate_empty_scenario(context_client) - - descriptor_loader = DescriptorLoader( - descriptors_file=DESCRIPTOR_FILE, context_client=context_client, device_client=device_client) - results = descriptor_loader.process() - check_descriptor_load_results(results, descriptor_loader) - descriptor_loader.validate() - - # Verify the scenario has no services/slices - response = context_client.GetContext(ADMIN_CONTEXT_ID) - assert len(response.service_ids) == 0 - assert len(response.slice_ids) == 0 - -def test_scenario_devices_enabled( - context_client : ContextClient, # pylint: disable=redefined-outer-name -) -> None: - DEVICE_OP_STATUS_ENABLED = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED - - num_devices = -1 - num_devices_enabled, num_retry = 0, 0 - while (num_devices != num_devices_enabled) and (num_retry < 10): - time.sleep(1.0) - response = context_client.ListDevices(Empty()) - num_devices = len(response.devices) - num_devices_enabled = 0 - for device in response.devices: - if device.device_operational_status != DEVICE_OP_STATUS_ENABLED: continue - num_devices_enabled += 1 - LOGGER.info('Num Devices enabled: {:d}/{:d}'.format(num_devices_enabled, num_devices)) - num_retry += 1 diff --git a/src/tests/ofc25/tests/test_functional_bootstrap_opt.py b/src/tests/ofc25/tests/test_functional_bootstrap_opt.py deleted file mode 100644 index 8a6b8ddb4c82f537c81a35f46c096a1e30330109..0000000000000000000000000000000000000000 --- a/src/tests/ofc25/tests/test_functional_bootstrap_opt.py +++ /dev/null @@ -1,75 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging, os, time - -from common.Constants import DEFAULT_CONTEXT_NAME -from common.proto.context_pb2 import ( - ContextId, DeviceOperationalStatusEnum, Empty, -) -from common.tools.descriptor.Loader import ( - DescriptorLoader, check_descriptor_load_results, validate_empty_scenario, -) -from common.tools.object_factory.Context import json_context_id -from context.client.ContextClient import ContextClient -from device.client.DeviceClient import DeviceClient - -from .Fixtures import ( - context_client, device_client, -) # pylint: disable=unused-import - -LOGGER = logging.getLogger(__name__) -LOGGER.setLevel(logging.DEBUG) - -DESCRIPTOR_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'descriptors', 'topology_opt.json') -ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) - -def test_scenario_bootstrap( - context_client : ContextClient, # pylint: disable=redefined-outer-name - device_client : DeviceClient, # pylint: disable=redefined-outer-name -) -> None: - validate_empty_scenario(context_client) - - descriptor_loader = DescriptorLoader( - descriptors_file=DESCRIPTOR_FILE, context_client=context_client, device_client=device_client) - results = descriptor_loader.process() - check_descriptor_load_results(results, descriptor_loader) - descriptor_loader.validate() - - # Verify the scenario has no services/slices - response = context_client.GetContext(ADMIN_CONTEXT_ID) - assert len(response.service_ids) == 0 - assert len(response.slice_ids) == 0 - -def test_scenario_devices_enabled( - context_client : ContextClient, # pylint: disable=redefined-outer-name -) -> None: - """ - This test validates that the devices are enabled. - """ - """ DEVICE_OP_STATUS_ENABLED = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED - - num_devices = -1 - num_devices_enabled, num_retry = 0, 0 - while (num_devices != num_devices_enabled) and (num_retry < 10): - time.sleep(1.0) - response = context_client.ListDevices(Empty()) - num_devices = len(response.devices) - num_devices_enabled = 0 - for device in response.devices: - if device.device_operational_status != DEVICE_OP_STATUS_ENABLED: continue - num_devices_enabled += 1 - LOGGER.info('Num Devices enabled: {:d}/{:d}'.format(num_devices_enabled, num_devices)) - num_retry += 1 """ - assert 1 == 1 diff --git a/src/tests/ofc25/tests/test_functional_cleanup_ip.py b/src/tests/ofc25/tests/test_functional_cleanup.py similarity index 56% rename from src/tests/ofc25/tests/test_functional_cleanup_ip.py rename to src/tests/ofc25/tests/test_functional_cleanup.py index fb33a4d45826f73c3bf62b31dbd79a0f68f7c814..0b42903d3f17b4396b52b823cd877951102d758d 100644 --- a/src/tests/ofc25/tests/test_functional_cleanup_ip.py +++ b/src/tests/ofc25/tests/test_functional_cleanup.py @@ -1,4 +1,4 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# Copyright 2022-2026 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,39 +12,42 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging, os +import logging from common.Constants import DEFAULT_CONTEXT_NAME from common.proto.context_pb2 import ContextId -from common.tools.descriptor.Loader import ( - DescriptorLoader, validate_empty_scenario, -) +from common.tools.descriptor.Loader import DescriptorLoader, validate_empty_scenario from common.tools.object_factory.Context import json_context_id -from context.client.ContextClient import ContextClient -from device.client.DeviceClient import DeviceClient -from .Fixtures import ( - context_client, device_client, -) # pylint: disable=unused-import +# pylint: disable=unused-import +from .conftest import ( + selected_tfs_client_bundle, selected_tfs_profile, selected_topology_descriptor +) +from .Helper import validate_descriptor_state LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.DEBUG) -DESCRIPTOR_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'descriptors', 'topology_ip.json') ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) + def test_scenario_cleanup( - context_client : ContextClient, # pylint: disable=redefined-outer-name - device_client : DeviceClient, # pylint: disable=redefined-outer-name + selected_tfs_client_bundle, + selected_tfs_profile: str, + selected_topology_descriptor: str, ) -> None: - # Verify the scenario has no services/slices + context_client = selected_tfs_client_bundle.context + device_client = selected_tfs_client_bundle.device + response = context_client.GetContext(ADMIN_CONTEXT_ID) assert len(response.service_ids) == 0 assert len(response.slice_ids) == 0 - # Load descriptors and validate the base scenario descriptor_loader = DescriptorLoader( - descriptors_file=DESCRIPTOR_FILE, context_client=context_client, device_client=device_client) - descriptor_loader.validate() + descriptors_file=selected_topology_descriptor, + context_client=context_client, + device_client=device_client, + ) + validate_descriptor_state(context_client, descriptor_loader, selected_tfs_profile) descriptor_loader.unload() validate_empty_scenario(context_client) diff --git a/src/tests/ofc25/tests/test_functional_cleanup_e2e.py b/src/tests/ofc25/tests/test_functional_cleanup_e2e.py deleted file mode 100644 index 8f81d3cd9d559a044f52bee6d4e08325870d2bb6..0000000000000000000000000000000000000000 --- a/src/tests/ofc25/tests/test_functional_cleanup_e2e.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging, os - -from common.Constants import DEFAULT_CONTEXT_NAME -from common.proto.context_pb2 import ContextId -from common.tools.descriptor.Loader import ( - DescriptorLoader, validate_empty_scenario, -) -from common.tools.object_factory.Context import json_context_id -from context.client.ContextClient import ContextClient -from device.client.DeviceClient import DeviceClient - -from .Fixtures import ( - context_client, device_client, -) # pylint: disable=unused-import - -LOGGER = logging.getLogger(__name__) -LOGGER.setLevel(logging.DEBUG) - -DESCRIPTOR_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'descriptors', 'topology_e2e.json') -ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) - -def test_scenario_cleanup( - context_client : ContextClient, # pylint: disable=redefined-outer-name - device_client : DeviceClient, # pylint: disable=redefined-outer-name -) -> None: - # Verify the scenario has no services/slices - response = context_client.GetContext(ADMIN_CONTEXT_ID) - assert len(response.service_ids) == 0 - assert len(response.slice_ids) == 0 - - # Load descriptors and validate the base scenario - descriptor_loader = DescriptorLoader( - descriptors_file=DESCRIPTOR_FILE, context_client=context_client, device_client=device_client) - descriptor_loader.validate() - descriptor_loader.unload() - validate_empty_scenario(context_client) diff --git a/src/tests/ofc25/tests/test_functional_cleanup_opt.py b/src/tests/ofc25/tests/test_functional_cleanup_opt.py deleted file mode 100644 index 4e7a76d1cb7286ce1f0d73f1123d5eed67172e1d..0000000000000000000000000000000000000000 --- a/src/tests/ofc25/tests/test_functional_cleanup_opt.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging, os - -from common.Constants import DEFAULT_CONTEXT_NAME -from common.proto.context_pb2 import ContextId -from common.tools.descriptor.Loader import ( - DescriptorLoader, validate_empty_scenario, -) -from common.tools.object_factory.Context import json_context_id -from context.client.ContextClient import ContextClient -from device.client.DeviceClient import DeviceClient - -from .Fixtures import ( - context_client, device_client, -) # pylint: disable=unused-import - -LOGGER = logging.getLogger(__name__) -LOGGER.setLevel(logging.DEBUG) - -DESCRIPTOR_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'descriptors', 'topology_opt.json') -ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) - -def test_scenario_cleanup( - context_client : ContextClient, # pylint: disable=redefined-outer-name - device_client : DeviceClient, # pylint: disable=redefined-outer-name -) -> None: - # Verify the scenario has no services/slices - response = context_client.GetContext(ADMIN_CONTEXT_ID) - assert len(response.service_ids) == 0 - assert len(response.slice_ids) == 0 - - # Load descriptors and validate the base scenario - descriptor_loader = DescriptorLoader( - descriptors_file=DESCRIPTOR_FILE, context_client=context_client, device_client=device_client) - descriptor_loader.validate() - descriptor_loader.unload() - validate_empty_scenario(context_client) diff --git a/src/tests/ofc25/tests/test_functional_create_service.py b/src/tests/ofc25/tests/test_functional_create_service.py deleted file mode 100644 index b987bf8dedd354bee2cf8e6eb9a8ffdf52e9b0e9..0000000000000000000000000000000000000000 --- a/src/tests/ofc25/tests/test_functional_create_service.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging, os - -from common.Constants import DEFAULT_CONTEXT_NAME -from common.proto.context_pb2 import ( - ContextId, ServiceStatusEnum, ServiceTypeEnum, -) -from common.tools.descriptor.Loader import ( - DescriptorLoader, check_descriptor_load_results, -) -from common.tools.grpc.Tools import grpc_message_to_json_string -from common.tools.object_factory.Context import json_context_id -from context.client.ContextClient import ContextClient -from device.client.DeviceClient import DeviceClient -from service.client.ServiceClient import ServiceClient - -from .Fixtures import ( - context_client, device_client, service_client, -) # pylint: disable=unused-import - -LOGGER = logging.getLogger(__name__) -LOGGER.setLevel(logging.DEBUG) - -DESCRIPTOR_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'descriptors', 'virtual_link.json') -ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) - -def test_service_creation_bidir( - context_client : ContextClient, # pylint: disable=redefined-outer-name - # device_client : DeviceClient, # pylint: disable=redefined-outer-name - # service_client : ServiceClient, # pylint: disable=redefined-outer-name -): - # Load descriptors and validate the base scenario - # descriptor_loader = DescriptorLoader( - # descriptors_file=DESCRIPTOR_FILE, context_client=context_client, device_client=device_client, - # service_client=service_client - # ) - # results = descriptor_loader.process() - # check_descriptor_load_results(results, descriptor_loader) - - import create_service - - # Verify the scenario has 1 service and 0 slices - response = context_client.GetContext(ADMIN_CONTEXT_ID) - assert len(response.service_ids) == 1 - assert len(response.slice_ids) == 0 - - # Check there are no slices - response = context_client.ListSlices(ADMIN_CONTEXT_ID) - LOGGER.warning('Slices[{:d}] = {:s}'.format(len(response.slices), grpc_message_to_json_string(response))) - assert len(response.slices) == 0 - - # Check there is 1 service - response = context_client.ListServices(ADMIN_CONTEXT_ID) - LOGGER.warning('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response))) - assert len(response.services) == 1 - - for service in response.services: - service_id = service.service_id - assert service.service_status.service_status == ServiceStatusEnum.SERVICESTATUS_ACTIVE - - response = context_client.ListConnections(service_id) - LOGGER.warning(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format( - grpc_message_to_json_string(service_id), len(response.connections), grpc_message_to_json_string(response))) - - if service.service_type == ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY: - assert len(response.connections) == 2 - else: - str_service = grpc_message_to_json_string(service) - raise Exception('Unexpected ServiceType: {:s}'.format(str_service)) diff --git a/src/tests/ofc25/tests/test_functional_create_vlinks.py b/src/tests/ofc25/tests/test_functional_create_vlinks.py new file mode 100644 index 0000000000000000000000000000000000000000..a4d38a510e1a2679a60beaffd5e24e8647135b90 --- /dev/null +++ b/src/tests/ofc25/tests/test_functional_create_vlinks.py @@ -0,0 +1,89 @@ +# Copyright 2022-2026 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import os +from typing import Set + +from common.tools.descriptor.Loader import DescriptorLoader, check_descriptor_load_results + +# pylint: disable=unused-import +from .Fixtures import PROFILE_E2E, PROFILE_IP, PROFILE_OPT, tfs_clients +from .Helper import ( + DESCRIPTORS_DIR, + VIRTUAL_LINK_DESCRIPTORS, + build_expected_optical_connections, + wait_for_state_or_raise, +) + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +def test_create_virtual_link( + tfs_clients, +) -> None: + ip_context_client = tfs_clients[PROFILE_IP].context + ip_device_client = tfs_clients[PROFILE_IP].device + ip_vnt_manager_client = tfs_clients[PROFILE_IP].vnt_manager + e2e_context_client = tfs_clients[PROFILE_E2E].context + opt_context_client = tfs_clients[PROFILE_OPT].context + + assert ip_vnt_manager_client is not None + + # Initial state: no services in any TFS and no virtual links in IP. + wait_for_state_or_raise( + ip_context_client=ip_context_client, + e2e_context_client=e2e_context_client, + opt_context_client=opt_context_client, + expected_virtual_link_uuids=None, + expected_virtual_link_names=set(), + expected_e2e_services=0, + expected_opt_services=0, + expected_opt_connections={}, + ) + + expected_virtual_link_names: Set[str] = set() + for index, (descriptor_name, virtual_link_name) in enumerate(VIRTUAL_LINK_DESCRIPTORS, start=1): + descriptor_file = os.path.join(DESCRIPTORS_DIR, descriptor_name) + LOGGER.info( + 'Creating virtual link step %d/%d from descriptor %s', + index, len(VIRTUAL_LINK_DESCRIPTORS), descriptor_file + ) + descriptor_loader = DescriptorLoader( + descriptors_file=descriptor_file, + context_client=ip_context_client, + device_client=ip_device_client, + vntm_client=ip_vnt_manager_client, + ) + results = descriptor_loader.process() + check_descriptor_load_results(results, descriptor_loader) + LOGGER.info('Virtual link request submitted successfully for %s', virtual_link_name) + + expected_virtual_link_names.add(virtual_link_name) + LOGGER.info( + 'Waiting for propagated state after creating %s: expected_virtual_link_names=%s ' + 'expected_e2e_services=%d', + virtual_link_name, str(sorted(expected_virtual_link_names)), index + ) + expected_opt_connections = build_expected_optical_connections(expected_virtual_link_names) + wait_for_state_or_raise( + ip_context_client=ip_context_client, + e2e_context_client=e2e_context_client, + opt_context_client=opt_context_client, + expected_virtual_link_uuids=None, + expected_virtual_link_names=expected_virtual_link_names, + expected_e2e_services=index, + expected_opt_services=index, + expected_opt_connections=expected_opt_connections, + ) diff --git a/src/tests/ofc25/tests/test_functional_delete_service.py b/src/tests/ofc25/tests/test_functional_delete_service.py deleted file mode 100644 index 9c689d259226ae53c14ffcd90e14941d9a9cba5a..0000000000000000000000000000000000000000 --- a/src/tests/ofc25/tests/test_functional_delete_service.py +++ /dev/null @@ -1,86 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -from typing import Set, Tuple - -from common.Constants import DEFAULT_CONTEXT_NAME -from common.proto.context_pb2 import ( - ContextId, ServiceId, ServiceStatusEnum, ServiceTypeEnum, -) -from common.tools.grpc.Tools import grpc_message_to_json_string -from common.tools.object_factory.Context import json_context_id -from common.tools.object_factory.Service import json_service_id -from context.client.ContextClient import ContextClient -from service.client.ServiceClient import ServiceClient - -from .Fixtures import ( - context_client, service_client, -) # pylint: disable=unused-import - -LOGGER = logging.getLogger(__name__) -LOGGER.setLevel(logging.DEBUG) - -ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) - -def test_service_removal_bidir( - context_client : ContextClient, # pylint: disable=redefined-outer-name - service_client : ServiceClient, # pylint: disable=redefined-outer-name -): - # Verify the scenario has 1 service and 0 slices - response = context_client.GetContext(ADMIN_CONTEXT_ID) - assert len(response.service_ids) == 1 - assert len(response.slice_ids) == 0 - - # Check there are no slices - response = context_client.ListSlices(ADMIN_CONTEXT_ID) - LOGGER.warning('Slices[{:d}] = {:s}'.format(len(response.slices), grpc_message_to_json_string(response))) - assert len(response.slices) == 0 - - # Check there is 1 service - response = context_client.ListServices(ADMIN_CONTEXT_ID) - LOGGER.warning('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response))) - assert len(response.services) == 1 - - context_service_uuids : Set[Tuple[str, str]] = set() - for service in response.services: - service_id = service.service_id - assert service.service_status.service_status == ServiceStatusEnum.SERVICESTATUS_ACTIVE - - response = context_client.ListConnections(service_id) - LOGGER.warning(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format( - grpc_message_to_json_string(service_id), len(response.connections), grpc_message_to_json_string(response))) - - if service.service_type == ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY: - assert len(response.connections) == 2 - context_uuid = service_id.context_id.context_uuid.uuid - service_uuid = service_id.service_uuid.uuid - context_service_uuids.add((context_uuid, service_uuid)) - else: - str_service = grpc_message_to_json_string(service) - raise Exception('Unexpected ServiceType: {:s}'.format(str_service)) - - # Identify service to delete - assert len(context_service_uuids) == 1 - context_uuid, service_uuid = set(context_service_uuids).pop() - - # Delete Service - # service_client.DeleteService(ServiceId(**json_service_id(service_uuid, json_context_id(context_uuid)))) - - import delete_service - - # Verify the scenario has no services/slices - response = context_client.GetContext(ADMIN_CONTEXT_ID) - assert len(response.service_ids) == 0 - assert len(response.slice_ids) == 0 diff --git a/src/tests/ofc25/tests/test_functional_delete_vlinks.py b/src/tests/ofc25/tests/test_functional_delete_vlinks.py new file mode 100644 index 0000000000000000000000000000000000000000..1a280c758077a17968e38beee452ed4fc754851c --- /dev/null +++ b/src/tests/ofc25/tests/test_functional_delete_vlinks.py @@ -0,0 +1,88 @@ +# Copyright 2022-2026 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import os + +from common.tools.descriptor.Loader import DescriptorLoader + +# pylint: disable=unused-import +from .Fixtures import PROFILE_E2E, PROFILE_IP, PROFILE_OPT, tfs_clients +from .Helper import ( + DESCRIPTORS_DIR, + VIRTUAL_LINK_DESCRIPTORS, + build_expected_optical_connections, + wait_for_state_or_raise, +) + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + + +def test_delete_virtual_links( + tfs_clients, +) -> None: + ip_context_client = tfs_clients[PROFILE_IP].context + ip_device_client = tfs_clients[PROFILE_IP].device + ip_vnt_manager_client = tfs_clients[PROFILE_IP].vnt_manager + e2e_context_client = tfs_clients[PROFILE_E2E].context + opt_context_client = tfs_clients[PROFILE_OPT].context + + assert ip_vnt_manager_client is not None + + expected_virtual_link_names = {link_name for _, link_name in VIRTUAL_LINK_DESCRIPTORS} + wait_for_state_or_raise( + ip_context_client=ip_context_client, + e2e_context_client=e2e_context_client, + opt_context_client=opt_context_client, + expected_virtual_link_uuids=None, + expected_virtual_link_names=expected_virtual_link_names, + expected_e2e_services=len(VIRTUAL_LINK_DESCRIPTORS), + expected_opt_services=len(VIRTUAL_LINK_DESCRIPTORS), + expected_opt_connections=build_expected_optical_connections(expected_virtual_link_names), + ) + + for remaining, (descriptor_name, virtual_link_name) in zip( + [2, 1, 0], reversed(VIRTUAL_LINK_DESCRIPTORS) + ): + descriptor_file = os.path.join(DESCRIPTORS_DIR, descriptor_name) + LOGGER.info( + 'Deleting virtual link from descriptor %s; expecting %d remaining E2E services afterwards', + descriptor_file, remaining + ) + descriptor_loader = DescriptorLoader( + descriptors_file=descriptor_file, + context_client=ip_context_client, + device_client=ip_device_client, + vntm_client=ip_vnt_manager_client, + ) + descriptor_loader.unload() + LOGGER.info('Virtual link removal submitted successfully for %s', virtual_link_name) + + expected_virtual_link_names.remove(virtual_link_name) + LOGGER.info( + 'Waiting for propagated state after deleting %s: expected_virtual_link_names=%s ' + 'expected_e2e_services=%d', + virtual_link_name, str(sorted(expected_virtual_link_names)), remaining + ) + wait_for_state_or_raise( + ip_context_client=ip_context_client, + e2e_context_client=e2e_context_client, + opt_context_client=opt_context_client, + expected_virtual_link_uuids=None, + expected_virtual_link_names=expected_virtual_link_names, + expected_e2e_services=remaining, + expected_opt_services=remaining, + expected_opt_connections=build_expected_optical_connections(expected_virtual_link_names), + ) diff --git a/src/tests/ofc25/tfs-ingress-e2e.yaml b/src/tests/ofc25/tfs-ingress-e2e.yaml index 27f140f3df549eee89f25d901e5ed824f688d754..df4b1e2f24a81249942b138d57dd53cf12dac738 100644 --- a/src/tests/ofc25/tfs-ingress-e2e.yaml +++ b/src/tests/ofc25/tfs-ingress-e2e.yaml @@ -15,10 +15,40 @@ apiVersion: networking.k8s.io/v1 kind: Ingress metadata: - name: tfs-ingress-e2e + name: tfs-ingress-e2e-web annotations: nginx.ingress.kubernetes.io/rewrite-target: "/$2" - + nginx.ingress.kubernetes.io/use-regex: "true" + nginx.ingress.kubernetes.io/limit-rps: "50" # max requests per second per source IP + nginx.ingress.kubernetes.io/limit-connections: "50" # max concurrent connections per source IP + nginx.ingress.kubernetes.io/proxy-connect-timeout: "60" # max timeout for connecting to server + nginx.ingress.kubernetes.io/proxy-send-timeout: "3600" # max timeout between two successive read operations + nginx.ingress.kubernetes.io/proxy-read-timeout: "3600" # max timeout between two successive write operations +spec: + ingressClassName: tfs-ingress-class-e2e + rules: + - http: + paths: + - path: /webui(/|$)(.*) + pathType: ImplementationSpecific + backend: + service: + name: webuiservice + port: + number: 8004 + - path: /grafana(/|$)(.*) + pathType: ImplementationSpecific + backend: + service: + name: webuiservice + port: + number: 3000 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: tfs-ingress-e2e-nbi + annotations: # Enable websocket services and configure sticky cookies (seems not to work) #nginx.org/websocket-services: "nbiservice" #nginx.org/sticky-cookie-services: "serviceName=nbiservice tfs-nbi-session expires=1h path=/socket.io" @@ -44,49 +74,63 @@ spec: rules: - http: paths: - - path: /webui(/|$)(.*) + - path: /.well-known pathType: Prefix backend: service: - name: webuiservice + name: nbiservice port: - number: 8004 - - path: /grafana(/|$)(.*) + number: 8080 + - path: /restconf pathType: Prefix backend: service: - name: webuiservice + name: nbiservice port: - number: 3000 - - path: /()(restconf/.*) + number: 8080 + - path: /socket.io + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /tfs-api + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /bmw pathType: Prefix backend: service: name: nbiservice port: number: 8080 - - path: /()(socket.io/.*) + - path: /qkd_app pathType: Prefix backend: service: name: nbiservice port: number: 8080 - - path: /()(tfs-api/.*) + - path: /camara pathType: Prefix backend: service: name: nbiservice port: number: 8080 - - path: /()(bmw/.*) + - path: /agent-probes pathType: Prefix backend: service: name: nbiservice port: number: 8080 - - path: /()(qkd_app/.*) + - path: /osm-api pathType: Prefix backend: service: diff --git a/src/tests/ofc25/tfs-ingress-ip.yaml b/src/tests/ofc25/tfs-ingress-ip.yaml index cde7accf12c7364361dbab671eceb6bbb6143acf..568c83e46cb28b30ff5578d7aca80921ac4cb5ea 100644 --- a/src/tests/ofc25/tfs-ingress-ip.yaml +++ b/src/tests/ofc25/tfs-ingress-ip.yaml @@ -15,10 +15,40 @@ apiVersion: networking.k8s.io/v1 kind: Ingress metadata: - name: tfs-ingress-ip + name: tfs-ingress-ip-web annotations: nginx.ingress.kubernetes.io/rewrite-target: "/$2" - + nginx.ingress.kubernetes.io/use-regex: "true" + nginx.ingress.kubernetes.io/limit-rps: "50" # max requests per second per source IP + nginx.ingress.kubernetes.io/limit-connections: "50" # max concurrent connections per source IP + nginx.ingress.kubernetes.io/proxy-connect-timeout: "60" # max timeout for connecting to server + nginx.ingress.kubernetes.io/proxy-send-timeout: "3600" # max timeout between two successive read operations + nginx.ingress.kubernetes.io/proxy-read-timeout: "3600" # max timeout between two successive write operations +spec: + ingressClassName: tfs-ingress-class-ip + rules: + - http: + paths: + - path: /webui(/|$)(.*) + pathType: ImplementationSpecific + backend: + service: + name: webuiservice + port: + number: 8004 + - path: /grafana(/|$)(.*) + pathType: ImplementationSpecific + backend: + service: + name: webuiservice + port: + number: 3000 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: tfs-ingress-ip-nbi + annotations: # Enable websocket services and configure sticky cookies (seems not to work) #nginx.org/websocket-services: "nbiservice" #nginx.org/sticky-cookie-services: "serviceName=nbiservice tfs-nbi-session expires=1h path=/socket.io" @@ -44,49 +74,63 @@ spec: rules: - http: paths: - - path: /webui(/|$)(.*) + - path: /.well-known pathType: Prefix backend: service: - name: webuiservice + name: nbiservice port: - number: 8004 - - path: /grafana(/|$)(.*) + number: 8080 + - path: /restconf pathType: Prefix backend: service: - name: webuiservice + name: nbiservice port: - number: 3000 - - path: /()(restconf/.*) + number: 8080 + - path: /socket.io + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /tfs-api + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /bmw pathType: Prefix backend: service: name: nbiservice port: number: 8080 - - path: /()(socket.io/.*) + - path: /qkd_app pathType: Prefix backend: service: name: nbiservice port: number: 8080 - - path: /()(tfs-api/.*) + - path: /camara pathType: Prefix backend: service: name: nbiservice port: number: 8080 - - path: /()(bmw/.*) + - path: /agent-probes pathType: Prefix backend: service: name: nbiservice port: number: 8080 - - path: /()(qkd_app/.*) + - path: /osm-api pathType: Prefix backend: service: diff --git a/src/tests/ofc25/tfs-ingress-opt.yaml b/src/tests/ofc25/tfs-ingress-opt.yaml index cf5fd09e9ba601eb1980fc5e99f98d4a326d544c..ef33548baf67d05514c535532ebb5b5a7e8723b3 100644 --- a/src/tests/ofc25/tfs-ingress-opt.yaml +++ b/src/tests/ofc25/tfs-ingress-opt.yaml @@ -15,10 +15,40 @@ apiVersion: networking.k8s.io/v1 kind: Ingress metadata: - name: tfs-ingress-opt + name: tfs-ingress-opt-web annotations: nginx.ingress.kubernetes.io/rewrite-target: "/$2" - + nginx.ingress.kubernetes.io/use-regex: "true" + nginx.ingress.kubernetes.io/limit-rps: "50" # max requests per second per source IP + nginx.ingress.kubernetes.io/limit-connections: "50" # max concurrent connections per source IP + nginx.ingress.kubernetes.io/proxy-connect-timeout: "60" # max timeout for connecting to server + nginx.ingress.kubernetes.io/proxy-send-timeout: "3600" # max timeout between two successive read operations + nginx.ingress.kubernetes.io/proxy-read-timeout: "3600" # max timeout between two successive write operations +spec: + ingressClassName: tfs-ingress-class-opt + rules: + - http: + paths: + - path: /webui(/|$)(.*) + pathType: ImplementationSpecific + backend: + service: + name: webuiservice + port: + number: 8004 + - path: /grafana(/|$)(.*) + pathType: ImplementationSpecific + backend: + service: + name: webuiservice + port: + number: 3000 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: tfs-ingress-opt-nbi + annotations: # Enable websocket services and configure sticky cookies (seems not to work) #nginx.org/websocket-services: "nbiservice" #nginx.org/sticky-cookie-services: "serviceName=nbiservice tfs-nbi-session expires=1h path=/socket.io" @@ -44,49 +74,63 @@ spec: rules: - http: paths: - - path: /webui(/|$)(.*) + - path: /.well-known pathType: Prefix backend: service: - name: webuiservice + name: nbiservice port: - number: 8004 - - path: /grafana(/|$)(.*) + number: 8080 + - path: /restconf pathType: Prefix backend: service: - name: webuiservice + name: nbiservice port: - number: 3000 - - path: /()(restconf/.*) + number: 8080 + - path: /socket.io + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /tfs-api + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /bmw pathType: Prefix backend: service: name: nbiservice port: number: 8080 - - path: /()(socket.io/.*) + - path: /qkd_app pathType: Prefix backend: service: name: nbiservice port: number: 8080 - - path: /()(tfs-api/.*) + - path: /camara pathType: Prefix backend: service: name: nbiservice port: number: 8080 - - path: /()(bmw/.*) + - path: /agent-probes pathType: Prefix backend: service: name: nbiservice port: number: 8080 - - path: /()(qkd_app/.*) + - path: /osm-api pathType: Prefix backend: service: diff --git a/src/tests/ofc25/undeploy.sh b/src/tests/ofc25/undeploy_all_in_one.sh similarity index 100% rename from src/tests/ofc25/undeploy.sh rename to src/tests/ofc25/undeploy_all_in_one.sh diff --git a/src/tests/scenario2/tfs-ingress-dom1.yaml b/src/tests/scenario2/tfs-ingress-dom1.yaml index c497d2e73d02ecaf99f783b27be386322fa3a45e..166d17297d5dad138eed2dbce38f83b59b47bbfb 100644 --- a/src/tests/scenario2/tfs-ingress-dom1.yaml +++ b/src/tests/scenario2/tfs-ingress-dom1.yaml @@ -15,43 +15,122 @@ apiVersion: networking.k8s.io/v1 kind: Ingress metadata: - name: tfs-ingress-dom1 + name: tfs-ingress-dom1-web annotations: - nginx.ingress.kubernetes.io/rewrite-target: /$2 + nginx.ingress.kubernetes.io/rewrite-target: "/$2" + nginx.ingress.kubernetes.io/use-regex: "true" + nginx.ingress.kubernetes.io/limit-rps: "50" # max requests per second per source IP + nginx.ingress.kubernetes.io/limit-connections: "50" # max concurrent connections per source IP + nginx.ingress.kubernetes.io/proxy-connect-timeout: "60" # max timeout for connecting to server + nginx.ingress.kubernetes.io/proxy-send-timeout: "3600" # max timeout between two successive read operations + nginx.ingress.kubernetes.io/proxy-read-timeout: "3600" # max timeout between two successive write operations spec: ingressClassName: tfs-ingress-class-dom1 rules: - http: paths: - path: /webui(/|$)(.*) - pathType: Prefix + pathType: ImplementationSpecific backend: service: name: webuiservice port: number: 8004 - path: /grafana(/|$)(.*) - pathType: Prefix + pathType: ImplementationSpecific backend: service: name: webuiservice port: number: 3000 - - path: /()(restconf/.*) +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: tfs-ingress-dom1-nbi + annotations: + # Enable websocket services and configure sticky cookies (seems not to work) + #nginx.org/websocket-services: "nbiservice" + #nginx.org/sticky-cookie-services: "serviceName=nbiservice tfs-nbi-session expires=1h path=/socket.io" + + # Enable sticky sessions (use same backend for all connections + # originated by a specific client, identified through its cookie) + nginx.ingress.kubernetes.io/affinity: "cookie" + nginx.ingress.kubernetes.io/affinity-mode: "persistent" + nginx.ingress.kubernetes.io/session-cookie-name: "tfs-nbi-session" + nginx.ingress.kubernetes.io/session-cookie-path: "/socket.io" + nginx.ingress.kubernetes.io/session-cookie-expires: "3600" + nginx.ingress.kubernetes.io/session-cookie-change-on-failure: "true" + + nginx.ingress.kubernetes.io/limit-rps: "50" # max requests per second per source IP + nginx.ingress.kubernetes.io/limit-connections: "50" # max concurrent connections per source IP + nginx.ingress.kubernetes.io/proxy-connect-timeout: "60" # max timeout for connecting to server + + # Enable long-lived connections, required for websocket/socket.io streams + nginx.ingress.kubernetes.io/proxy-send-timeout: "3600" # max timeout between two successive read operations + nginx.ingress.kubernetes.io/proxy-read-timeout: "3600" # max timeout between two successive write operations +spec: + ingressClassName: tfs-ingress-class-dom1 + rules: + - http: + paths: + - path: /.well-known + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /restconf + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /socket.io + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /tfs-api + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /bmw + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /qkd_app + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /camara pathType: Prefix backend: service: name: nbiservice port: number: 8080 - - path: /()(tfs-api/.*) + - path: /agent-probes pathType: Prefix backend: service: name: nbiservice port: number: 8080 - - path: /()(bmw/.*) + - path: /osm-api pathType: Prefix backend: service: diff --git a/src/tests/scenario2/tfs-ingress-dom2.yaml b/src/tests/scenario2/tfs-ingress-dom2.yaml index 985a1be9e8699701819471cc8e5b175eb78afe66..7711c38ba63957112f83ae96813d446c28058414 100644 --- a/src/tests/scenario2/tfs-ingress-dom2.yaml +++ b/src/tests/scenario2/tfs-ingress-dom2.yaml @@ -15,43 +15,122 @@ apiVersion: networking.k8s.io/v1 kind: Ingress metadata: - name: tfs-ingress-dom2 + name: tfs-ingress-dom2-web annotations: - nginx.ingress.kubernetes.io/rewrite-target: /$2 + nginx.ingress.kubernetes.io/rewrite-target: "/$2" + nginx.ingress.kubernetes.io/use-regex: "true" + nginx.ingress.kubernetes.io/limit-rps: "50" # max requests per second per source IP + nginx.ingress.kubernetes.io/limit-connections: "50" # max concurrent connections per source IP + nginx.ingress.kubernetes.io/proxy-connect-timeout: "60" # max timeout for connecting to server + nginx.ingress.kubernetes.io/proxy-send-timeout: "3600" # max timeout between two successive read operations + nginx.ingress.kubernetes.io/proxy-read-timeout: "3600" # max timeout between two successive write operations spec: ingressClassName: tfs-ingress-class-dom2 rules: - http: paths: - path: /webui(/|$)(.*) - pathType: Prefix + pathType: ImplementationSpecific backend: service: name: webuiservice port: number: 8004 - path: /grafana(/|$)(.*) - pathType: Prefix + pathType: ImplementationSpecific backend: service: name: webuiservice port: number: 3000 - - path: /()(restconf/.*) +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: tfs-ingress-dom2-nbi + annotations: + # Enable websocket services and configure sticky cookies (seems not to work) + #nginx.org/websocket-services: "nbiservice" + #nginx.org/sticky-cookie-services: "serviceName=nbiservice tfs-nbi-session expires=1h path=/socket.io" + + # Enable sticky sessions (use same backend for all connections + # originated by a specific client, identified through its cookie) + nginx.ingress.kubernetes.io/affinity: "cookie" + nginx.ingress.kubernetes.io/affinity-mode: "persistent" + nginx.ingress.kubernetes.io/session-cookie-name: "tfs-nbi-session" + nginx.ingress.kubernetes.io/session-cookie-path: "/socket.io" + nginx.ingress.kubernetes.io/session-cookie-expires: "3600" + nginx.ingress.kubernetes.io/session-cookie-change-on-failure: "true" + + nginx.ingress.kubernetes.io/limit-rps: "50" # max requests per second per source IP + nginx.ingress.kubernetes.io/limit-connections: "50" # max concurrent connections per source IP + nginx.ingress.kubernetes.io/proxy-connect-timeout: "60" # max timeout for connecting to server + + # Enable long-lived connections, required for websocket/socket.io streams + nginx.ingress.kubernetes.io/proxy-send-timeout: "3600" # max timeout between two successive read operations + nginx.ingress.kubernetes.io/proxy-read-timeout: "3600" # max timeout between two successive write operations +spec: + ingressClassName: tfs-ingress-class-dom2 + rules: + - http: + paths: + - path: /.well-known + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /restconf + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /socket.io + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /tfs-api + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /bmw + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /qkd_app + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /camara pathType: Prefix backend: service: name: nbiservice port: number: 8080 - - path: /()(tfs-api/.*) + - path: /agent-probes pathType: Prefix backend: service: name: nbiservice port: number: 8080 - - path: /()(bmw/.*) + - path: /osm-api pathType: Prefix backend: service: diff --git a/src/tests/scenario2/tfs-ingress-dom3.yaml b/src/tests/scenario2/tfs-ingress-dom3.yaml index e882f59fda98338b9292ab49953ea5b78937f202..b9517b0d882ed409542117ff5f8a974efe43bd5e 100644 --- a/src/tests/scenario2/tfs-ingress-dom3.yaml +++ b/src/tests/scenario2/tfs-ingress-dom3.yaml @@ -15,43 +15,122 @@ apiVersion: networking.k8s.io/v1 kind: Ingress metadata: - name: tfs-ingress-dom3 + name: tfs-ingress-dom3-web annotations: - nginx.ingress.kubernetes.io/rewrite-target: /$2 + nginx.ingress.kubernetes.io/rewrite-target: "/$2" + nginx.ingress.kubernetes.io/use-regex: "true" + nginx.ingress.kubernetes.io/limit-rps: "50" # max requests per second per source IP + nginx.ingress.kubernetes.io/limit-connections: "50" # max concurrent connections per source IP + nginx.ingress.kubernetes.io/proxy-connect-timeout: "60" # max timeout for connecting to server + nginx.ingress.kubernetes.io/proxy-send-timeout: "3600" # max timeout between two successive read operations + nginx.ingress.kubernetes.io/proxy-read-timeout: "3600" # max timeout between two successive write operations spec: ingressClassName: tfs-ingress-class-dom3 rules: - http: paths: - path: /webui(/|$)(.*) - pathType: Prefix + pathType: ImplementationSpecific backend: service: name: webuiservice port: number: 8004 - path: /grafana(/|$)(.*) - pathType: Prefix + pathType: ImplementationSpecific backend: service: name: webuiservice port: number: 3000 - - path: /()(restconf/.*) +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: tfs-ingress-dom3-nbi + annotations: + # Enable websocket services and configure sticky cookies (seems not to work) + #nginx.org/websocket-services: "nbiservice" + #nginx.org/sticky-cookie-services: "serviceName=nbiservice tfs-nbi-session expires=1h path=/socket.io" + + # Enable sticky sessions (use same backend for all connections + # originated by a specific client, identified through its cookie) + nginx.ingress.kubernetes.io/affinity: "cookie" + nginx.ingress.kubernetes.io/affinity-mode: "persistent" + nginx.ingress.kubernetes.io/session-cookie-name: "tfs-nbi-session" + nginx.ingress.kubernetes.io/session-cookie-path: "/socket.io" + nginx.ingress.kubernetes.io/session-cookie-expires: "3600" + nginx.ingress.kubernetes.io/session-cookie-change-on-failure: "true" + + nginx.ingress.kubernetes.io/limit-rps: "50" # max requests per second per source IP + nginx.ingress.kubernetes.io/limit-connections: "50" # max concurrent connections per source IP + nginx.ingress.kubernetes.io/proxy-connect-timeout: "60" # max timeout for connecting to server + + # Enable long-lived connections, required for websocket/socket.io streams + nginx.ingress.kubernetes.io/proxy-send-timeout: "3600" # max timeout between two successive read operations + nginx.ingress.kubernetes.io/proxy-read-timeout: "3600" # max timeout between two successive write operations +spec: + ingressClassName: tfs-ingress-class-dom3 + rules: + - http: + paths: + - path: /.well-known + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /restconf + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /socket.io + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /tfs-api + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /bmw + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /qkd_app + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /camara pathType: Prefix backend: service: name: nbiservice port: number: 8080 - - path: /()(tfs-api/.*) + - path: /agent-probes pathType: Prefix backend: service: name: nbiservice port: number: 8080 - - path: /()(bmw/.*) + - path: /osm-api pathType: Prefix backend: service: diff --git a/src/tests/scenario2/tfs-ingress-dom4.yaml b/src/tests/scenario2/tfs-ingress-dom4.yaml index 1f7b98f9a5c761da7e38847688a0153e48efe5c4..883e93b76e0c5b3951f24498c3d9f4843b6ca2a0 100644 --- a/src/tests/scenario2/tfs-ingress-dom4.yaml +++ b/src/tests/scenario2/tfs-ingress-dom4.yaml @@ -15,43 +15,122 @@ apiVersion: networking.k8s.io/v1 kind: Ingress metadata: - name: tfs-ingress-dom4 + name: tfs-ingress-dom4-web annotations: - nginx.ingress.kubernetes.io/rewrite-target: /$2 + nginx.ingress.kubernetes.io/rewrite-target: "/$2" + nginx.ingress.kubernetes.io/use-regex: "true" + nginx.ingress.kubernetes.io/limit-rps: "50" # max requests per second per source IP + nginx.ingress.kubernetes.io/limit-connections: "50" # max concurrent connections per source IP + nginx.ingress.kubernetes.io/proxy-connect-timeout: "60" # max timeout for connecting to server + nginx.ingress.kubernetes.io/proxy-send-timeout: "3600" # max timeout between two successive read operations + nginx.ingress.kubernetes.io/proxy-read-timeout: "3600" # max timeout between two successive write operations spec: ingressClassName: tfs-ingress-class-dom4 rules: - http: paths: - path: /webui(/|$)(.*) - pathType: Prefix + pathType: ImplementationSpecific backend: service: name: webuiservice port: number: 8004 - path: /grafana(/|$)(.*) - pathType: Prefix + pathType: ImplementationSpecific backend: service: name: webuiservice port: number: 3000 - - path: /()(restconf/.*) +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: tfs-ingress-dom4-nbi + annotations: + # Enable websocket services and configure sticky cookies (seems not to work) + #nginx.org/websocket-services: "nbiservice" + #nginx.org/sticky-cookie-services: "serviceName=nbiservice tfs-nbi-session expires=1h path=/socket.io" + + # Enable sticky sessions (use same backend for all connections + # originated by a specific client, identified through its cookie) + nginx.ingress.kubernetes.io/affinity: "cookie" + nginx.ingress.kubernetes.io/affinity-mode: "persistent" + nginx.ingress.kubernetes.io/session-cookie-name: "tfs-nbi-session" + nginx.ingress.kubernetes.io/session-cookie-path: "/socket.io" + nginx.ingress.kubernetes.io/session-cookie-expires: "3600" + nginx.ingress.kubernetes.io/session-cookie-change-on-failure: "true" + + nginx.ingress.kubernetes.io/limit-rps: "50" # max requests per second per source IP + nginx.ingress.kubernetes.io/limit-connections: "50" # max concurrent connections per source IP + nginx.ingress.kubernetes.io/proxy-connect-timeout: "60" # max timeout for connecting to server + + # Enable long-lived connections, required for websocket/socket.io streams + nginx.ingress.kubernetes.io/proxy-send-timeout: "3600" # max timeout between two successive read operations + nginx.ingress.kubernetes.io/proxy-read-timeout: "3600" # max timeout between two successive write operations +spec: + ingressClassName: tfs-ingress-class-dom4 + rules: + - http: + paths: + - path: /.well-known + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /restconf + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /socket.io + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /tfs-api + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /bmw + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /qkd_app + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /camara pathType: Prefix backend: service: name: nbiservice port: number: 8080 - - path: /()(tfs-api/.*) + - path: /agent-probes pathType: Prefix backend: service: name: nbiservice port: number: 8080 - - path: /()(bmw/.*) + - path: /osm-api pathType: Prefix backend: service: diff --git a/src/vnt_manager/service/VNTManagerServiceServicerImpl.py b/src/vnt_manager/service/VNTManagerServiceServicerImpl.py index da77c33dce8d270596b27d2adcf56207ffe6ccf9..5372ac68e8ae33312a8fb6a3677e3d5c2aa914f2 100644 --- a/src/vnt_manager/service/VNTManagerServiceServicerImpl.py +++ b/src/vnt_manager/service/VNTManagerServiceServicerImpl.py @@ -13,7 +13,7 @@ # limitations under the License. from typing import Dict, Optional -import grpc, json, logging, uuid +import grpc, json, logging, time, uuid from confluent_kafka import Consumer as KafkaConsumer from confluent_kafka import Producer as KafkaProducer from confluent_kafka import KafkaError @@ -68,6 +68,30 @@ class VNTManagerServiceServicerImpl(VNTManagerServiceServicer): self.kafka_producer.flush() return request_key + def create_reply_consumer(self) -> KafkaConsumer: + LOGGER.info('[create_reply_consumer] begin') + kafka_consumer = KafkaConsumer({ + 'bootstrap.servers' : KafkaConfig.get_kafka_address(), + 'group.id' : str(uuid.uuid4()), + 'auto.offset.reset' : 'latest', + 'enable.auto.commit' : False, + 'max.poll.interval.ms': 600000, + 'session.timeout.ms' : 60000, + }) + kafka_consumer.subscribe([KafkaTopic.VNTMANAGER_RESPONSE.value]) + + deadline = time.monotonic() + 15.0 + while time.monotonic() < deadline: + kafka_consumer.poll(0.2) + assignment = kafka_consumer.assignment() + if len(assignment) > 0: + LOGGER.info('[create_reply_consumer] assigned=%s', str(assignment)) + return kafka_consumer + + LOGGER.error('[create_reply_consumer] timed out waiting for topic assignment') + kafka_consumer.close() + raise Exception('Kafka consumer subscription to VNT Manager reply topic was not assigned') + def send_vlink_create(self, request : Link) -> str: return self.send_recommendation({ 'event': 'vlink_create', 'data': grpc_message_to_json_string(request) @@ -78,21 +102,24 @@ class VNTManagerServiceServicerImpl(VNTManagerServiceServicer): 'event': 'vlink_remove', 'data': grpc_message_to_json_string(request) }) - def wait_for_reply(self, request_key : str) -> Optional[Dict]: + def wait_for_reply(self, request_key : str, kafka_consumer : KafkaConsumer) -> Optional[Dict]: LOGGER.info('[wait_for_reply] request_key={:s}'.format(str(request_key))) - - self.kafka_consumer = KafkaConsumer({ - 'bootstrap.servers' : KafkaConfig.get_kafka_address(), - 'group.id' : str(uuid.uuid4()), - 'auto.offset.reset' : 'latest', - 'max.poll.interval.ms': 600000, - 'session.timeout.ms' : 60000, - }) - self.kafka_consumer.subscribe([KafkaTopic.VNTMANAGER_RESPONSE.value]) - + deadline = time.monotonic() + 120.0 + polls_without_message = 0 while True: - receive_msg = self.kafka_consumer.poll(2.0) - if receive_msg is None: continue + receive_msg = kafka_consumer.poll(2.0) + if receive_msg is None: + polls_without_message += 1 + if polls_without_message % 5 == 0: + LOGGER.info( + '[wait_for_reply] request_key=%s still waiting... assignment=%s', + str(request_key), str(kafka_consumer.assignment()) + ) + if time.monotonic() >= deadline: + raise TimeoutError('Timed out waiting for VNT Manager reply for request_key={:s}'.format( + str(request_key) + )) + continue LOGGER.info('[wait_for_reply] receive_msg={:s}'.format(str(receive_msg))) if receive_msg.error(): if receive_msg.error().code() == KafkaError._PARTITION_EOF: continue @@ -127,8 +154,13 @@ class VNTManagerServiceServicerImpl(VNTManagerServiceServicer): def SetVirtualLink(self, request : Link, context : grpc.ServicerContext) -> LinkId: try: LOGGER.info('[SetVirtualLink] request={:s}'.format(grpc_message_to_json_string(request))) + kafka_consumer = self.create_reply_consumer() request_key = self.send_vlink_create(request) - reply = self.wait_for_reply(request_key) + try: + reply = self.wait_for_reply(request_key, kafka_consumer) + finally: + LOGGER.info('[SetVirtualLink] closing reply consumer') + kafka_consumer.close() LOGGER.info('[SetVirtualLink] reply={:s}'.format(str(reply))) # At this point, we know the request is processed and an optical connection was created @@ -193,8 +225,13 @@ class VNTManagerServiceServicerImpl(VNTManagerServiceServicer): def RemoveVirtualLink(self, request : LinkId, context : grpc.ServicerContext) -> Empty: try: LOGGER.info('[RemoveVirtualLink] request={:s}'.format(grpc_message_to_json_string(request))) + kafka_consumer = self.create_reply_consumer() request_key = self.send_vlink_remove(request) - reply = self.wait_for_reply(request_key) + try: + reply = self.wait_for_reply(request_key, kafka_consumer) + finally: + LOGGER.info('[RemoveVirtualLink] closing reply consumer') + kafka_consumer.close() LOGGER.info('[RemoveVirtualLink] reply={:s}'.format(str(reply))) # At this point, we know the request is processed and an optical connection was removed