diff --git a/common_requirements.in b/common_requirements.in index 39982ebe75dedf8bfaceebe1bfcf986a815ea7ff..37b70c993e913602f9d5e509d0c887802c5d0b1e 100644 --- a/common_requirements.in +++ b/common_requirements.in @@ -16,7 +16,7 @@ coverage==6.3 grpcio==1.47.* grpcio-health-checking==1.47.* grpcio-tools==1.47.* -grpclib[protobuf] +grpclib==0.4.4 prettytable==3.5.0 prometheus-client==0.13.0 protobuf==3.20.* diff --git a/deploy/tfs.sh b/deploy/tfs.sh index a67cbeafc9edb6edf5aa6f5bfe1bc027d2099028..019fcfa9ea8095207fd26f6d96e4e33626534357 100755 --- a/deploy/tfs.sh +++ b/deploy/tfs.sh @@ -177,7 +177,7 @@ echo "export PYTHONPATH=${PYTHONPATH}" >> $ENV_VARS_SCRIPT echo "Create Redis secret..." # first try to delete an old one if exists -kubectl delete secret redis-secrets --namespace=$TFS_K8S_NAMESPACE +kubectl delete secret redis-secrets --namespace=$TFS_K8S_NAMESPACE --ignore-not-found REDIS_PASSWORD=`uuidgen` kubectl create secret generic redis-secrets --namespace=$TFS_K8S_NAMESPACE \ --from-literal=REDIS_PASSWORD=$REDIS_PASSWORD @@ -432,6 +432,31 @@ if [[ "$TFS_COMPONENTS" == *"webui"* ]]; then }' ${GRAFANA_URL_UPDATED}/api/datasources echo + curl -X POST -H "Content-Type: application/json" -H "Accept: application/json" -d '{ + "access" : "proxy", + "type" : "postgres", + "name" : "cockroachdb", + "url" : "'cockroachdb-public.${CRDB_NAMESPACE}.svc.cluster.local:${CRDB_SQL_PORT}'", + "database" : "'${CRDB_DATABASE}'", + "user" : "'${CRDB_USERNAME}'", + "basicAuth": false, + "isDefault": false, + "jsonData" : { + "sslmode" : "require", + "postgresVersion" : 1100, + "maxOpenConns" : 0, + "maxIdleConns" : 2, + "connMaxLifetime" : 14400, + "tlsAuth" : false, + "tlsAuthWithCACert" : false, + "timescaledb" : false, + "tlsConfigurationMethod": "file-path", + "tlsSkipVerify" : true + }, + "secureJsonData": {"password": "'${CRDB_PASSWORD}'"} + }' ${GRAFANA_URL_UPDATED}/api/datasources + echo + # adding the datasource of the metrics collection framework curl -X POST -H "Content-Type: application/json" -H "Accept: application/json" -d '{ "access" : "proxy", diff --git a/proto/context.proto b/proto/context.proto index 3b25e6361766ee4c2b52e15aab215409f40cbb56..3104f1b545c02bab71c8638ebba03efdcbfe71ff 100644 --- a/proto/context.proto +++ b/proto/context.proto @@ -175,6 +175,7 @@ message Device { repeated DeviceDriverEnum device_drivers = 6; repeated EndPoint device_endpoints = 7; repeated Component component = 8; // Used for inventory + DeviceId controller_id = 9; // Identifier of node controlling the actual device } message Component { @@ -276,9 +277,10 @@ enum ServiceTypeEnum { enum ServiceStatusEnum { SERVICESTATUS_UNDEFINED = 0; SERVICESTATUS_PLANNED = 1; - SERVICESTATUS_ACTIVE = 2; - SERVICESTATUS_PENDING_REMOVAL = 3; - SERVICESTATUS_SLA_VIOLATED = 4; + SERVICESTATUS_ACTIVE = 2; + SERVICESTATUS_UPDATING = 3; + SERVICESTATUS_PENDING_REMOVAL = 4; + SERVICESTATUS_SLA_VIOLATED = 5; } message ServiceStatus { @@ -554,6 +556,13 @@ message Constraint_SLA_Isolation_level { repeated IsolationLevelEnum isolation_level = 1; } +message Constraint_Exclusions { + bool is_permanent = 1; + repeated DeviceId device_ids = 2; + repeated EndPointId endpoint_ids = 3; + repeated LinkId link_ids = 4; +} + message Constraint { oneof constraint { Constraint_Custom custom = 1; @@ -564,6 +573,7 @@ message Constraint { Constraint_SLA_Latency sla_latency = 6; Constraint_SLA_Availability sla_availability = 7; Constraint_SLA_Isolation_level sla_isolation = 8; + Constraint_Exclusions exclusions = 9; } } diff --git a/proto/service.proto b/proto/service.proto index 21e5699413cc4842962af6ee9c204b383fc61ec0..658859e3c5aac58e792d508a89b467e937198c5b 100644 --- a/proto/service.proto +++ b/proto/service.proto @@ -18,7 +18,8 @@ package service; import "context.proto"; service ServiceService { - rpc CreateService(context.Service ) returns (context.ServiceId) {} - rpc UpdateService(context.Service ) returns (context.ServiceId) {} - rpc DeleteService(context.ServiceId) returns (context.Empty ) {} + rpc CreateService (context.Service ) returns (context.ServiceId) {} + rpc UpdateService (context.Service ) returns (context.ServiceId) {} + rpc DeleteService (context.ServiceId) returns (context.Empty ) {} + rpc RecomputeConnections(context.Service ) returns (context.Empty ) {} } diff --git a/src/common/type_checkers/Assertions.py b/src/common/type_checkers/Assertions.py index ba82e535ec958104bd14abf625eb6cd38c2a08ee..d5476a9534ca6e2d74ba16d3af71ed367bc5ab51 100644 --- a/src/common/type_checkers/Assertions.py +++ b/src/common/type_checkers/Assertions.py @@ -69,7 +69,9 @@ def validate_service_state_enum(message): 'SERVICESTATUS_UNDEFINED', 'SERVICESTATUS_PLANNED', 'SERVICESTATUS_ACTIVE', + 'SERVICESTATUS_UPDATING', 'SERVICESTATUS_PENDING_REMOVAL', + 'SERVICESTATUS_SLA_VIOLATED', ] diff --git a/src/compute/service/__main__.py b/src/compute/service/__main__.py index 19a04c4c8ef4f77b3b7fb6949a1b567ef5cbec00..6c744d0dcef67fef1d8ac719eaba9420b530fe58 100644 --- a/src/compute/service/__main__.py +++ b/src/compute/service/__main__.py @@ -65,7 +65,7 @@ def main(): rest_server.start() # Wait for Ctrl+C or termination signal - while not terminate.wait(timeout=0.1): pass + while not terminate.wait(timeout=1.0): pass LOGGER.info('Terminating...') grpc_service.stop() diff --git a/src/context/service/__main__.py b/src/context/service/__main__.py index 92d4c88275e4751602bf735f734df84469c43c17..5ac91f233504a25bb787df5be9b1a36848f4e5c4 100644 --- a/src/context/service/__main__.py +++ b/src/context/service/__main__.py @@ -44,9 +44,17 @@ def main(): start_http_server(metrics_port) # Get Database Engine instance and initialize database, if needed + LOGGER.info('Getting SQLAlchemy DB Engine...') db_engine = Engine.get_engine() - if db_engine is None: return -1 - Engine.create_database(db_engine) + if db_engine is None: + LOGGER.error('Unable to get SQLAlchemy DB Engine...') + return -1 + + try: + Engine.create_database(db_engine) + except: # pylint: disable=bare-except # pragma: no cover + LOGGER.exception('Failed to check/create the database: {:s}'.format(str(db_engine.url))) + rebuild_database(db_engine) # Get message broker instance @@ -57,7 +65,7 @@ def main(): grpc_service.start() # Wait for Ctrl+C or termination signal - while not terminate.wait(timeout=0.1): pass + while not terminate.wait(timeout=1.0): pass LOGGER.info('Terminating...') grpc_service.stop() diff --git a/src/context/service/database/Constraint.py b/src/context/service/database/Constraint.py index 592d7f4c545a222092ca95924afafa69d2798d7c..b33316539e7ab728194bda52e80cbc4896981ca2 100644 --- a/src/context/service/database/Constraint.py +++ b/src/context/service/database/Constraint.py @@ -66,7 +66,7 @@ def compose_constraints_data( constraint_name = '{:s}:{:s}:{:s}'.format(parent_kind, kind.value, endpoint_uuid) elif kind in { ConstraintKindEnum.SCHEDULE, ConstraintKindEnum.SLA_CAPACITY, ConstraintKindEnum.SLA_LATENCY, - ConstraintKindEnum.SLA_AVAILABILITY, ConstraintKindEnum.SLA_ISOLATION + ConstraintKindEnum.SLA_AVAILABILITY, ConstraintKindEnum.SLA_ISOLATION, ConstraintKindEnum.EXCLUSIONS }: constraint_name = '{:s}:{:s}:'.format(parent_kind, kind.value) else: diff --git a/src/context/service/database/Device.py b/src/context/service/database/Device.py index 3e106bc158ab804c7eada7284e9d1b883eb66264..7fc202b9077f2e1212d0c81313fcfbd1c05efb43 100644 --- a/src/context/service/database/Device.py +++ b/src/context/service/database/Device.py @@ -74,6 +74,11 @@ def device_set(db_engine : Engine, request : Device) -> Tuple[Dict, bool]: device_name = raw_device_uuid if len(raw_device_name) == 0 else raw_device_name device_uuid = device_get_uuid(request.device_id, device_name=device_name, allow_random=True) + if len(request.controller_id.device_uuid.uuid) > 0: + controller_uuid = device_get_uuid(request.controller_id, allow_random=False) + else: + controller_uuid = None + device_type = request.device_type oper_status = grpc_to_enum__device_operational_status(request.device_operational_status) device_drivers = [grpc_to_enum__device_driver(d) for d in request.device_drivers] @@ -139,6 +144,9 @@ def device_set(db_engine : Engine, request : Device) -> Tuple[Dict, bool]: 'updated_at' : now, }] + if controller_uuid is not None: + device_data[0]['controller_uuid'] = controller_uuid + def callback(session : Session) -> bool: stmt = insert(DeviceModel).values(device_data) stmt = stmt.on_conflict_do_update( diff --git a/src/context/service/database/Engine.py b/src/context/service/database/Engine.py index 5cfe7cd4be6fefb4fe2e0921e2cd0e2b4e23c60a..5924329900dda78d7a15ce7eebc6cbc69e954f8f 100644 --- a/src/context/service/database/Engine.py +++ b/src/context/service/database/Engine.py @@ -42,12 +42,6 @@ class Engine: LOGGER.exception('Failed to connect to database: {:s}'.format(str(crdb_uri))) return None - try: - Engine.create_database(engine) - except: # pylint: disable=bare-except # pragma: no cover - LOGGER.exception('Failed to check/create to database: {:s}'.format(str(crdb_uri))) - return None - return engine @staticmethod diff --git a/src/context/service/database/models/ConstraintModel.py b/src/context/service/database/models/ConstraintModel.py index cbbe0b5d7280a6f14d645b66abd4df444abb41aa..d093e782adde23092d9c9d3949f9153c8ee9d4f3 100644 --- a/src/context/service/database/models/ConstraintModel.py +++ b/src/context/service/database/models/ConstraintModel.py @@ -30,6 +30,7 @@ class ConstraintKindEnum(enum.Enum): SLA_LATENCY = 'sla_latency' SLA_AVAILABILITY = 'sla_availability' SLA_ISOLATION = 'sla_isolation' + EXCLUSIONS = 'exclusions' class ServiceConstraintModel(_Base): __tablename__ = 'service_constraint' diff --git a/src/context/service/database/models/DeviceModel.py b/src/context/service/database/models/DeviceModel.py index beb500d601aa725c5c0d3c01633aebf31aa23e5b..1097d0b9ab47a86c47ce2ad8394d067ae9f9953e 100644 --- a/src/context/service/database/models/DeviceModel.py +++ b/src/context/service/database/models/DeviceModel.py @@ -13,7 +13,7 @@ # limitations under the License. import operator -from sqlalchemy import Column, DateTime, Enum, String +from sqlalchemy import Column, DateTime, Enum, ForeignKey, String from sqlalchemy.dialects.postgresql import ARRAY, UUID from sqlalchemy.orm import relationship from typing import Dict, List @@ -29,16 +29,22 @@ class DeviceModel(_Base): device_type = Column(String, nullable=False) device_operational_status = Column(Enum(ORM_DeviceOperationalStatusEnum), nullable=False) device_drivers = Column(ARRAY(Enum(ORM_DeviceDriverEnum), dimensions=1)) + controller_uuid = Column(UUID(as_uuid=False), ForeignKey('device.device_uuid'), nullable=True) created_at = Column(DateTime, nullable=False) updated_at = Column(DateTime, nullable=False) #topology_devices = relationship('TopologyDeviceModel', back_populates='device') config_rules = relationship('DeviceConfigRuleModel', passive_deletes=True) # lazy='joined', back_populates='device' endpoints = relationship('EndPointModel', passive_deletes=True) # lazy='joined', back_populates='device' + controller = relationship('DeviceModel', remote_side=[device_uuid], passive_deletes=True) # lazy='joined', back_populates='device' def dump_id(self) -> Dict: return {'device_uuid': {'uuid': self.device_uuid}} + def dump_controller(self) -> Dict: + if self.controller is None: return {} + return self.controller.dump_id() + def dump_endpoints(self) -> List[Dict]: return [endpoint.dump() for endpoint in self.endpoints] @@ -60,6 +66,7 @@ class DeviceModel(_Base): 'device_type' : self.device_type, 'device_operational_status': self.device_operational_status.value, 'device_drivers' : [driver.value for driver in self.device_drivers], + 'controller_id' : self.dump_controller(), } if include_endpoints: result['device_endpoints'] = self.dump_endpoints() if include_config_rules: result['device_config'] = self.dump_config_rules() diff --git a/src/context/service/database/models/enums/ServiceStatus.py b/src/context/service/database/models/enums/ServiceStatus.py index 00ae71f7460fd76a3b8e0f3a981d2e2d08f89e7b..cd2a183b825eff54a51a844ea6834263bbabbc31 100644 --- a/src/context/service/database/models/enums/ServiceStatus.py +++ b/src/context/service/database/models/enums/ServiceStatus.py @@ -20,7 +20,9 @@ class ORM_ServiceStatusEnum(enum.Enum): UNDEFINED = ServiceStatusEnum.SERVICESTATUS_UNDEFINED PLANNED = ServiceStatusEnum.SERVICESTATUS_PLANNED ACTIVE = ServiceStatusEnum.SERVICESTATUS_ACTIVE + UPDATING = ServiceStatusEnum.SERVICESTATUS_UPDATING PENDING_REMOVAL = ServiceStatusEnum.SERVICESTATUS_PENDING_REMOVAL + SLA_VIOLATED = ServiceStatusEnum.SERVICESTATUS_SLA_VIOLATED grpc_to_enum__service_status = functools.partial( grpc_to_enum, ServiceStatusEnum, ORM_ServiceStatusEnum) diff --git a/src/dbscanserving/Dockerfile b/src/dbscanserving/Dockerfile index 5bbf0d215d7eb87f391f8115efe0c92e519dbf46..81e3fb28a059faf92f793aa5bd76d1a744e65d9b 100644 --- a/src/dbscanserving/Dockerfile +++ b/src/dbscanserving/Dockerfile @@ -31,9 +31,8 @@ RUN GRPC_HEALTH_PROBE_VERSION=v0.2.0 && \ RUN groupadd -r teraflow && useradd -u 1001 --no-log-init -r -m -g teraflow teraflow USER teraflow -RUN mkdir -p /home/teraflow/controller/common - # set working directory +RUN mkdir -p /home/teraflow/controller/common WORKDIR /home/teraflow/controller # Get Python packages per module @@ -58,7 +57,9 @@ COPY --chown=teraflow:teraflow src/common/. ./ RUN rm -rf proto # Create proto sub-folder, copy .proto files, and generate Python code +RUN mkdir -p /home/teraflow/controller/common/proto WORKDIR /home/teraflow/controller/common/proto +RUN touch __init__.py COPY --chown=teraflow:teraflow proto/*.proto ./ RUN python3 -m grpc_tools.protoc -I=. --python_out=. --grpc_python_out=. *.proto RUN rm *.proto @@ -77,5 +78,5 @@ RUN python3 -m pip install -r dbscanserving/requirements.txt # Add component files into working directory COPY --chown=teraflow:teraflow ./src/dbscanserving/. dbscanserving -# Start dbscanserving service +# Start the service ENTRYPOINT ["python", "-m", "dbscanserving.service"] diff --git a/src/device/service/DeviceServiceServicerImpl.py b/src/device/service/DeviceServiceServicerImpl.py index 38a6b735b32ee667c3be2f5381df84c40d773c06..d29d469cb0812218030698284abbfc7551058411 100644 --- a/src/device/service/DeviceServiceServicerImpl.py +++ b/src/device/service/DeviceServiceServicerImpl.py @@ -12,9 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -import grpc, logging, time +import grpc, logging, os, time from typing import Dict from prometheus_client import Histogram +from common.Constants import ServiceNameEnum +from common.Settings import ENVVAR_SUFIX_SERVICE_HOST, get_env_var_name from common.method_wrappers.Decorator import MetricTypeEnum, MetricsPool, safe_and_metered_rpc_method from common.method_wrappers.ServiceExceptions import NotFoundException, OperationFailedException from common.proto.context_pb2 import ( @@ -121,7 +123,15 @@ class DeviceServiceServicerImpl(DeviceServiceServicer): t9 = time.time() - device.device_operational_status = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_DISABLED + automation_service_host = get_env_var_name(ServiceNameEnum.AUTOMATION, ENVVAR_SUFIX_SERVICE_HOST) + environment_variables = set(os.environ.keys()) + if automation_service_host in environment_variables: + # Automation component is deployed; leave devices disabled. Automation will enable them. + device.device_operational_status = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_DISABLED + else: + # Automation is not deployed; assume the device is ready while onboarding and set them as enabled. + device.device_operational_status = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED + device_id = context_client.SetDevice(device) t10 = time.time() diff --git a/src/device/service/Tools.py b/src/device/service/Tools.py index cd3af07e3324e50ff43eb5e653c4c46771a5507e..6a62a75e71f0e02adb7fb1b70e4568b382494980 100644 --- a/src/device/service/Tools.py +++ b/src/device/service/Tools.py @@ -79,11 +79,13 @@ def check_no_endpoints(device_endpoints) -> None: 'interrogation of the physical device.') def get_device_controller_uuid(device : Device) -> Optional[str]: - for config_rule in device.device_config.config_rules: - if config_rule.WhichOneof('config_rule') != 'custom': continue - if config_rule.custom.resource_key != '_controller': continue - device_controller_id = json.loads(config_rule.custom.resource_value) - return device_controller_id['uuid'] + controller_uuid = device.controller_id.device_uuid.uuid + if len(controller_uuid) > 0: return controller_uuid + #for config_rule in device.device_config.config_rules: + # if config_rule.WhichOneof('config_rule') != 'custom': continue + # if config_rule.custom.resource_key != '_controller': continue + # device_controller_id = json.loads(config_rule.custom.resource_value) + # return device_controller_id['uuid'] return None def populate_endpoints( @@ -142,11 +144,12 @@ def populate_endpoints( # Sub-devices should not have a driver assigned. Instead, they should have # a config rule specifying their controller. #_sub_device.device_drivers.extend(resource_value['drivers']) # pylint: disable=no-member - controller_config_rule = _sub_device.device_config.config_rules.add() - controller_config_rule.action = ConfigActionEnum.CONFIGACTION_SET - controller_config_rule.custom.resource_key = '_controller' - controller = {'uuid': device_uuid, 'name': device_name} - controller_config_rule.custom.resource_value = json.dumps(controller, indent=0, sort_keys=True) + #controller_config_rule = _sub_device.device_config.config_rules.add() + #controller_config_rule.action = ConfigActionEnum.CONFIGACTION_SET + #controller_config_rule.custom.resource_key = '_controller' + #controller = {'uuid': device_uuid, 'name': device_name} + #controller_config_rule.custom.resource_value = json.dumps(controller, indent=0, sort_keys=True) + _sub_device.controller_id.device_uuid.uuid = device_uuid new_sub_devices[_sub_device_uuid] = _sub_device diff --git a/src/device/service/__main__.py b/src/device/service/__main__.py index 35b548fe9d9422b68138f956ce159ee679d54f1c..a07a2ab90d15d99bdabe6b3fb6b0e0c9c497cf3c 100644 --- a/src/device/service/__main__.py +++ b/src/device/service/__main__.py @@ -66,7 +66,7 @@ def main(): grpc_service.start() # Wait for Ctrl+C or termination signal - while not terminate.wait(timeout=0.1): pass + while not terminate.wait(timeout=1.0): pass LOGGER.info('Terminating...') grpc_service.stop() diff --git a/src/dlt/connector/service/__main__.py b/src/dlt/connector/service/__main__.py index c9812f90a76ebb06e35bde23033758e5740b877a..9d73ca9fcbfb8d7cda2308b2f58a3f84f72f072e 100644 --- a/src/dlt/connector/service/__main__.py +++ b/src/dlt/connector/service/__main__.py @@ -58,7 +58,7 @@ def main(): grpc_service.start() # Wait for Ctrl+C or termination signal - while not terminate.wait(timeout=0.1): pass + while not terminate.wait(timeout=1.0): pass LOGGER.info('Terminating...') grpc_service.stop() diff --git a/src/dlt/mock_blockchain/service/__main__.py b/src/dlt/mock_blockchain/service/__main__.py index e4cffac51064b68c2acc494410e51785c45cd437..65a80ed51c25afcf825fff427a41c7d484e25595 100644 --- a/src/dlt/mock_blockchain/service/__main__.py +++ b/src/dlt/mock_blockchain/service/__main__.py @@ -49,7 +49,7 @@ def main(): grpc_service.start() # Wait for Ctrl+C or termination signal - while not terminate.wait(timeout=0.1): pass + while not terminate.wait(timeout=1.0): pass LOGGER.info('Terminating...') grpc_service.stop() diff --git a/src/interdomain/service/__main__.py b/src/interdomain/service/__main__.py index f4bdbb7b80ca3f355a92268e74d28e02f7883302..73fa935399e7161aaf2ade06d51371c879607c3b 100644 --- a/src/interdomain/service/__main__.py +++ b/src/interdomain/service/__main__.py @@ -72,7 +72,7 @@ def main(): #remote_domain_clients.add_peer('remote-teraflow', 'remote-teraflow', interdomain_service_port_grpc) # Wait for Ctrl+C or termination signal - while not terminate.wait(timeout=0.1): pass + while not terminate.wait(timeout=1.0): pass LOGGER.info('Terminating...') topology_abstractor.stop() diff --git a/src/l3_attackmitigator/service/__main__.py b/src/l3_attackmitigator/service/__main__.py index 1e91d5e9729f027b096afa97d1808795193dc2fa..aadccbdab47a2bc6fce860ffe762018bec78be31 100644 --- a/src/l3_attackmitigator/service/__main__.py +++ b/src/l3_attackmitigator/service/__main__.py @@ -52,7 +52,7 @@ def main(): grpc_service.start() # Wait for Ctrl+C or termination signal - while not terminate.wait(timeout=0.1): pass + while not terminate.wait(timeout=1.0): pass logger.info('Terminating...') grpc_service.stop() diff --git a/src/l3_centralizedattackdetector/service/__main__.py b/src/l3_centralizedattackdetector/service/__main__.py index 3408127242a01c110263d14947d63e64cfaa79a2..5b8873afd6d67e586ce727eaafdf51f1c0002814 100644 --- a/src/l3_centralizedattackdetector/service/__main__.py +++ b/src/l3_centralizedattackdetector/service/__main__.py @@ -53,7 +53,7 @@ def main(): grpc_service.start() # Wait for Ctrl+C or termination signal - while not terminate.wait(timeout=0.1): pass + while not terminate.wait(timeout=1.0): pass logger.info('Terminating...') grpc_service.stop() diff --git a/src/load_generator/service/__main__.py b/src/load_generator/service/__main__.py index 7051a9a18bb2a86e2ca298b9ddfdc32f3e3fa6e7..5f5fa97f971223478abba2bfef1a1d6012fb6135 100644 --- a/src/load_generator/service/__main__.py +++ b/src/load_generator/service/__main__.py @@ -53,7 +53,7 @@ def main(): grpc_service.start() # Wait for Ctrl+C or termination signal - while not terminate.wait(timeout=0.1): pass + while not terminate.wait(timeout=1.0): pass scheduler = grpc_service.load_generator_servicer._scheduler if scheduler is not None: scheduler.stop() diff --git a/src/monitoring/service/__main__.py b/src/monitoring/service/__main__.py index 14f5609602c90eb9f54462e423af100997cf00d2..d0a132c70bed2c56bc9159ec3ad284120c0eb623 100644 --- a/src/monitoring/service/__main__.py +++ b/src/monitoring/service/__main__.py @@ -91,7 +91,7 @@ def main(): start_monitoring(name_mapping) # Wait for Ctrl+C or termination signal - while not terminate.wait(timeout=0.1): pass + while not terminate.wait(timeout=1.0): pass LOGGER.info('Terminating...') grpc_service.stop() diff --git a/src/opticalattackdetector/Dockerfile b/src/opticalattackdetector/Dockerfile index 6607a22b72a311bad90197d430980d3977e8531f..fd903a616395617fbbe312b5fca8303966fc6053 100644 --- a/src/opticalattackdetector/Dockerfile +++ b/src/opticalattackdetector/Dockerfile @@ -55,8 +55,10 @@ RUN python3 -m pip install -r common_requirements.txt # Add common files into working directory WORKDIR /home/teraflow/controller/common COPY --chown=teraflow:teraflow src/common/. ./ +RUN rm -rf proto # Create proto sub-folder, copy .proto files, and generate Python code +RUN mkdir -p /home/teraflow/controller/common/proto WORKDIR /home/teraflow/controller/common/proto RUN touch __init__.py COPY --chown=teraflow:teraflow proto/*.proto ./ @@ -69,18 +71,18 @@ RUN mkdir -p /home/teraflow/controller/opticalattackdetector WORKDIR /home/teraflow/controller # Get Python packages per module -COPY --chown=teraflow:teraflow src/opticalattackdetector/requirements.in opticalattackdetector/requirements.in +COPY --chown=teraflow:teraflow ./src/opticalattackdetector/requirements.in opticalattackdetector/requirements.in # consider common and specific requirements to avoid inconsistencies with dependencies RUN pip-compile --quiet --output-file=opticalattackdetector/requirements.txt opticalattackdetector/requirements.in common_requirements.in RUN python3 -m pip install -r opticalattackdetector/requirements.txt -# Add files into working directory -COPY --chown=teraflow:teraflow src/context/. context -COPY --chown=teraflow:teraflow src/service/. service -COPY --chown=teraflow:teraflow src/monitoring/. monitoring -COPY --chown=teraflow:teraflow src/dbscanserving/. dbscanserving -COPY --chown=teraflow:teraflow src/opticalattackmitigator/. opticalattackmitigator -COPY --chown=teraflow:teraflow src/opticalattackdetector/. opticalattackdetector +# Add component files into working directory +COPY --chown=teraflow:teraflow ./src/context/. context +COPY --chown=teraflow:teraflow ./src/service/. service +COPY --chown=teraflow:teraflow ./src/monitoring/. monitoring +COPY --chown=teraflow:teraflow ./src/dbscanserving/. dbscanserving +COPY --chown=teraflow:teraflow ./src/opticalattackmitigator/. opticalattackmitigator +COPY --chown=teraflow:teraflow ./src/opticalattackdetector/. opticalattackdetector -# Start opticalattackdetector service +# Start the service ENTRYPOINT ["python", "-m", "opticalattackdetector.service"] diff --git a/src/opticalattackmanager/Dockerfile b/src/opticalattackmanager/Dockerfile index d97a5c6f38d42205a97404a4685c4a1cca157f8d..9920d6cefbb4ffc87558ae562cfb4fcee365930f 100644 --- a/src/opticalattackmanager/Dockerfile +++ b/src/opticalattackmanager/Dockerfile @@ -16,11 +16,9 @@ FROM python:3.9-slim # Install dependencies RUN apt-get --yes --quiet --quiet update && \ - apt-get --yes --quiet --quiet install wget g++ nano && \ + apt-get --yes --quiet --quiet install wget g++ && \ rm -rf /var/lib/apt/lists/* -# TODO: remove nano from installation - # Set Python to show logs as they occur ENV PYTHONUNBUFFERED=0 ENV PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python @@ -53,7 +51,7 @@ RUN python3 -m pip install --upgrade pip-tools COPY --chown=teraflow:teraflow common_requirements.in common_requirements.in COPY --chown=teraflow:teraflow src/opticalattackmanager/requirements.in opticalattackmanager/requirements.in RUN sed -i '/protobuf/d' common_requirements.in && sed -i '/grpc/d' common_requirements.in -RUN pip-compile --output-file=common_requirements.txt common_requirements.in opticalattackmanager/requirements.in +RUN pip-compile --quiet --output-file=common_requirements.txt common_requirements.in opticalattackmanager/requirements.in RUN python3 -m pip install -r common_requirements.txt # Get Python packages per module @@ -64,11 +62,12 @@ RUN python3 -m pip install -r common_requirements.txt # Add common files into working directory WORKDIR /home/teraflow/controller/common COPY --chown=teraflow:teraflow src/common/. ./ +RUN rm -rf proto # Create proto sub-folder, copy .proto files, and generate Python code +RUN mkdir -p /home/teraflow/controller/common/proto/asyncio WORKDIR /home/teraflow/controller/common/proto RUN touch __init__.py -RUN mkdir -p /home/teraflow/controller/common/proto/asyncio RUN touch asyncio/__init__.py COPY --chown=teraflow:teraflow proto/*.proto ./ RUN python3 -m grpc_tools.protoc -I=. --python_out=. --grpc_python_out=. *.proto @@ -81,13 +80,13 @@ RUN find . -type f -exec sed -i -E 's/(import\ .*)_pb2/from . \1_pb2/g' {} \; RUN mkdir -p /home/teraflow/controller/opticalattackmanager WORKDIR /home/teraflow/controller -# Add files into working directory -COPY --chown=teraflow:teraflow src/context/. context -COPY --chown=teraflow:teraflow src/monitoring/. monitoring -COPY --chown=teraflow:teraflow src/dbscanserving/. dbscanserving -COPY --chown=teraflow:teraflow src/opticalattackdetector/. opticalattackdetector -COPY --chown=teraflow:teraflow src/opticalattackmitigator/. opticalattackmitigator -COPY --chown=teraflow:teraflow src/opticalattackmanager/. opticalattackmanager +# Add component files into working directory +COPY --chown=teraflow:teraflow ./src/context/. context +COPY --chown=teraflow:teraflow ./src/monitoring/. monitoring +COPY --chown=teraflow:teraflow ./src/dbscanserving/. dbscanserving +COPY --chown=teraflow:teraflow ./src/opticalattackdetector/. opticalattackdetector +COPY --chown=teraflow:teraflow ./src/opticalattackmitigator/. opticalattackmitigator +COPY --chown=teraflow:teraflow ./src/opticalattackmanager/. opticalattackmanager -# Start opticalattackmanager service +# Start the service ENTRYPOINT ["python", "-m", "opticalattackmanager.service"] diff --git a/src/opticalattackmanager/service/__main__.py b/src/opticalattackmanager/service/__main__.py index 7c3c8b0526f1d8b06b92143ad43635d560b5153b..af38e02a0c4a3098ce9684822654a2494611661d 100644 --- a/src/opticalattackmanager/service/__main__.py +++ b/src/opticalattackmanager/service/__main__.py @@ -44,6 +44,7 @@ from common.Settings import ( get_log_level, get_metrics_port, get_service_host, + get_service_port_grpc, get_setting, wait_for_environment_variables, ) @@ -254,7 +255,7 @@ def get_number_workers( async def monitor_services(terminate, service_list=None, cache=None): host = get_service_host(ServiceNameEnum.OPTICALATTACKDETECTOR) - port = get_metrics_port(ServiceNameEnum.OPTICALATTACKDETECTOR) + port = get_service_port_grpc(ServiceNameEnum.OPTICALATTACKDETECTOR) cur_number_workers = MIN_NUMBER_WORKERS desired_monitoring_interval = 30 # defaults to 30 seconds diff --git a/src/opticalattackmitigator/Dockerfile b/src/opticalattackmitigator/Dockerfile index 2efb2f4d25d301b47fb9ac87faf6bbc6366f8d48..e364cbee121c38570c8158946486ec38dcd8b12d 100644 --- a/src/opticalattackmitigator/Dockerfile +++ b/src/opticalattackmitigator/Dockerfile @@ -54,8 +54,10 @@ RUN python3 -m pip install -r common_requirements.txt # Add common files into working directory WORKDIR /home/teraflow/controller/common COPY --chown=teraflow:teraflow src/common/. ./ +RUN rm -rf proto # Create proto sub-folder, copy .proto files, and generate Python code +RUN mkdir -p /home/teraflow/controller/common/proto WORKDIR /home/teraflow/controller/common/proto RUN touch __init__.py COPY --chown=teraflow:teraflow proto/*.proto ./ @@ -68,13 +70,13 @@ RUN mkdir -p /home/teraflow/controller/opticalattackmitigator WORKDIR /home/teraflow/controller # Get Python packages per module -COPY --chown=teraflow:teraflow src/opticalattackmitigator/requirements.in opticalattackmitigator/requirements.in +COPY --chown=teraflow:teraflow ./src/opticalattackmitigator/requirements.in opticalattackmitigator/requirements.in # consider common and specific requirements to avoid inconsistencies with dependencies RUN pip-compile --quiet --output-file=opticalattackmitigator/requirements.txt opticalattackmitigator/requirements.in common_requirements.in RUN python3 -m pip install -r opticalattackmitigator/requirements.txt -# Add files into working directory -COPY --chown=teraflow:teraflow src/opticalattackmitigator/. opticalattackmitigator +# Add component files into working directory +COPY --chown=teraflow:teraflow ./src/opticalattackmitigator/. opticalattackmitigator -# Start opticalattackmitigator service +# Start the service ENTRYPOINT ["python", "-m", "opticalattackmitigator.service"] diff --git a/src/pathcomp/.gitignore b/src/pathcomp/.gitignore index 48a680bf0f45eb556108c349bc45be896f4219aa..82fc0ca316f8863947b66c3f294444161902e79c 100644 --- a/src/pathcomp/.gitignore +++ b/src/pathcomp/.gitignore @@ -1 +1,4 @@ backend/wireshark +backend/*.o +backend/pathComp +backend/pathComp-dbg diff --git a/src/pathcomp/backend/pathComp_RESTapi.c b/src/pathcomp/backend/pathComp_RESTapi.c index 1780cfde2039b5907ab0f5696885e17deb56644c..5e22136e7dd753cdf2a7f4a66289be740accb2a7 100644 --- a/src/pathcomp/backend/pathComp_RESTapi.c +++ b/src/pathcomp/backend/pathComp_RESTapi.c @@ -281,7 +281,6 @@ void add_comp_path_deviceId_endpointId_json(cJSON* pathObj, struct path_t* p, st return; } - //////////////////////////////////////////////////////////////////////////////////////// /** * @file pathComp_RESTapi.c @@ -812,12 +811,18 @@ void parsing_json_serviceList_array(cJSON* serviceArray) { parse_service_constraints(constraintArray, service); } - // Get the maximum number of paths to be computed (kPaths) - cJSON* kPathsObj = cJSON_GetObjectItemCaseSensitive(item, "kPaths"); - if (cJSON_IsNumber(kPathsObj)){ - service->kPaths = (guint)(kPathsObj->valuedouble); + // Get the maximum number of paths to be computed (kPaths) inspected/explored + cJSON* kPathsInspObj = cJSON_GetObjectItemCaseSensitive(item, "kPaths_inspection"); + if (cJSON_IsNumber(kPathsInspObj)){ + service->kPaths_inspected = (guint)(kPathsInspObj->valuedouble); } + // Get the maximum number of paths to be computed (kPaths) returned + cJSON* kPathsRetpObj = cJSON_GetObjectItemCaseSensitive(item, "kPaths_return"); + if (cJSON_IsNumber(kPathsRetpObj)){ + service->kPaths_returned = (guint)(kPathsRetpObj->valuedouble); + } + // Append the requested service to the serviceList serviceList = g_list_append(serviceList, service); } diff --git a/src/pathcomp/backend/pathComp_ksp.c b/src/pathcomp/backend/pathComp_ksp.c index 00ebaf5b8b7e0a888720a4092a0d23d75a3eb04b..f5e3c8fb8ea854c9acc9fb9f8bc00e3f34a3141a 100644 --- a/src/pathcomp/backend/pathComp_ksp.c +++ b/src/pathcomp/backend/pathComp_ksp.c @@ -63,15 +63,14 @@ void ksp_alg_execution_services(struct compRouteOutputList_t* outputList) { gint i = 0; for (GList* listnode = g_list_first(serviceList); listnode; - listnode = g_list_next(listnode), i++){ - //struct service_t* service = &(serviceList->services[i]); + listnode = g_list_next(listnode), i++){ struct service_t* service = (struct service_t*)(listnode->data); DEBUG_PC("Starting the Computation for ServiceId: %s [ContextId: %s]", service->serviceId.service_uuid, service->serviceId.contextId); struct compRouteOutput_t* pathService = &(outputList->compRouteConnection[i]); // check endpoints of the service are different (PE devices/nodes are different) if (same_src_dst_pe_nodeid(service) == 0) { - DEBUG_PC("PEs are the same... no path computation"); + DEBUG_PC("PEs are the same... NO PATH COMPUTATION"); comp_route_connection_issue_handler(pathService, service); outputList->numCompRouteConnList++; continue; @@ -84,6 +83,7 @@ void ksp_alg_execution_services(struct compRouteOutputList_t* outputList) { outputList->numCompRouteConnList++; continue; } + alg_comp(service, pathService, g, NO_OPTIMIZATION_ARGUMENT); // last parameter 0 is related to an optimization computation argument outputList->numCompRouteConnList++; @@ -92,7 +92,8 @@ void ksp_alg_execution_services(struct compRouteOutputList_t* outputList) { if (pathService->noPathIssue == NO_PATH_CONS_ISSUE) { continue; } - struct path_t* path = &(pathService->paths[pathService->numPaths - 1]); + // Out of the comnputed paths for the pathservice, the first one is chosen to be locally allocated + struct path_t* path = &(pathService->paths[0]); allocate_graph_resources(path, service, g); allocate_graph_reverse_resources(path, service, g); print_graph(g); @@ -111,8 +112,7 @@ void ksp_alg_execution_services(struct compRouteOutputList_t* outputList) { * @date 2022 */ ///////////////////////////////////////////////////////////////////////////////////////// -gint pathComp_ksp_alg(struct compRouteOutputList_t * routeConnList) -{ +gint pathComp_ksp_alg(struct compRouteOutputList_t * routeConnList) { g_assert(routeConnList); gint numSuccesPathComp = 0, numPathCompIntents = 0; diff --git a/src/pathcomp/backend/pathComp_sp.c b/src/pathcomp/backend/pathComp_sp.c index 48231b591e66ae0f8161ff14f79e7c9a6d832328..b6fd885e3fec7032993ef4d058df8256cc363965 100644 --- a/src/pathcomp/backend/pathComp_sp.c +++ b/src/pathcomp/backend/pathComp_sp.c @@ -143,7 +143,7 @@ void computation_shortest_path(struct service_t* s, struct compRouteOutput_t* pa DEBUG_PC("Computed Path Avail Bw: %f, Path Cost: %f, latency: %f", p->availCap, p->cost, p->delay); print_path(p); - gboolean feasibleRoute = check_computed_path_feasability(s, p); + gboolean feasibleRoute = check_computed_path_feasibility(s, p); if (feasibleRoute == TRUE) { DEBUG_PC("SP Feasible"); print_path(p); @@ -202,37 +202,37 @@ void sp_execution_services(struct compRouteOutputList_t* oPathList) { //struct service_t* service = &(serviceList->services[i]); struct service_t* service = (struct service_t*)(listnode->data); - DEBUG_PC("Starting the Computation for ServiceId: %s [ContextId: %s]", service->serviceId.service_uuid, service->serviceId.contextId); - struct compRouteOutput_t* pathService = &(oPathList->compRouteConnection[i]); - // check endpoints of the service are different (PE devices/nodes are different) - if (same_src_dst_pe_nodeid(service) == 0) { - DEBUG_PC("PEs are the same... no path computation"); - comp_route_connection_issue_handler(pathService, service); - oPathList->numCompRouteConnList++; - continue; - } - - // get the graph associated to the contextId in the contextSet, if no then error - struct graph_t* g = get_graph_by_contextId(contextSet, service->serviceId.contextId); - if (g == NULL) { - DEBUG_PC("The targeted contextId is NOT in the ContextSet ... then NO graph"); - comp_route_connection_issue_handler(pathService, service); - oPathList->numCompRouteConnList++; - continue; - } - - computation_shortest_path(service, pathService, g); - oPathList->numCompRouteConnList++; - - // for each network connectivity service, a single computed path (out of the KCSP) is retuned - // If path is found, then the selected resources must be pre-assigned into the context information - if (pathService->noPathIssue == NO_PATH_CONS_ISSUE) { - continue; - } - struct path_t* path = &(pathService->paths[pathService->numPaths - 1]); - //allocate_graph_resources(path, service, g); // LGR: crashes in some cases with assymetric topos - //allocate_graph_reverse_resources(path, service, g); // LGR: crashes in some cases with assymetric topos - print_graph(g); + DEBUG_PC("Starting the Computation for ServiceId: %s [ContextId: %s]", service->serviceId.service_uuid, service->serviceId.contextId); + struct compRouteOutput_t* pathService = &(oPathList->compRouteConnection[i]); + // check endpoints of the service are different (PE devices/nodes are different) + if (same_src_dst_pe_nodeid(service) == 0) { + DEBUG_PC("PEs are the same... no path computation"); + comp_route_connection_issue_handler(pathService, service); + oPathList->numCompRouteConnList++; + continue; + } + + // get the graph associated to the contextId in the contextSet, if no then error + struct graph_t* g = get_graph_by_contextId(contextSet, service->serviceId.contextId); + if (g == NULL) { + DEBUG_PC("The targeted contextId is NOT in the ContextSet ... then NO graph"); + comp_route_connection_issue_handler(pathService, service); + oPathList->numCompRouteConnList++; + continue; + } + + computation_shortest_path(service, pathService, g); + oPathList->numCompRouteConnList++; + + // for each network connectivity service, a single computed path (out of the KCSP) is retuned + // If path is found, then the selected resources must be pre-assigned into the context information + if (pathService->noPathIssue == NO_PATH_CONS_ISSUE) { + continue; + } + struct path_t* path = &(pathService->paths[pathService->numPaths - 1]); + //allocate_graph_resources(path, service, g); // LGR: crashes in some cases with assymetric topos + //allocate_graph_reverse_resources(path, service, g); // LGR: crashes in some cases with assymetric topos + print_graph(g); } return; } diff --git a/src/pathcomp/backend/pathComp_tools.c b/src/pathcomp/backend/pathComp_tools.c index e7b91ee9e5a8a0a1c28344d17247e307238ed4c7..bd4f7df8ca460e3bab3bb3d9f75e7592f19268c7 100644 --- a/src/pathcomp/backend/pathComp_tools.c +++ b/src/pathcomp/backend/pathComp_tools.c @@ -1459,8 +1459,7 @@ void modify_targeted_graph (struct graph_t *g, struct path_set_t *A, struct comp * @date 2022 */ ///////////////////////////////////////////////////////////////////////////////////////// -gint find_nodeId (gconstpointer data, gconstpointer userdata) -{ +gint find_nodeId (gconstpointer data, gconstpointer userdata) { /** check values */ g_assert(data != NULL); g_assert(userdata != NULL); @@ -1470,8 +1469,7 @@ gint find_nodeId (gconstpointer data, gconstpointer userdata) //DEBUG_PC ("SNodeId (%s) nodeId (%s)", SNodeId->node.nodeId, nodeId); - if (!memcmp(SNodeId->node.nodeId, nodeId, strlen (SNodeId->node.nodeId))) - { + if (!memcmp(SNodeId->node.nodeId, nodeId, strlen (SNodeId->node.nodeId))) { return (0); } return -1; @@ -1500,13 +1498,13 @@ gint check_link (struct nodeItem_t *u, gint indexGraphU, gint indexGraphV, struc g_assert(g); g_assert(s); g_assert(mapNodes); struct targetNodes_t *v = &(g->vertices[indexGraphU].targetedVertices[indexGraphV]); - DEBUG_PC("Explored Link %s => %s)", u->node.nodeId, v->tVertice.nodeId); + DEBUG_PC("=======================CHECK Edge %s => %s =================================", u->node.nodeId, v->tVertice.nodeId); //DEBUG_PC("\t %s => %s", u->node.nodeId, v->tVertice.nodeId); // v already explored in S? then, discard it GList *found = g_list_find_custom (*S, v->tVertice.nodeId, find_nodeId); if (found != NULL) { - DEBUG_PC ("v (%s) in S, Discard", v->tVertice.nodeId); + DEBUG_PC ("%s in S, DISCARD", v->tVertice.nodeId); return 0; } @@ -1523,10 +1521,11 @@ gint check_link (struct nodeItem_t *u, gint indexGraphU, gint indexGraphV, struc DEBUG_PC("EDGE %s[%s] => %s[%s]", u->node.nodeId, e->aEndPointId, v->tVertice.nodeId, e->zEndPointId); //DEBUG_PC ("\t %s[%s] =>", u->node.nodeId, e->aEndPointId); //DEBUG_PC("\t => %s[%s]", v->tVertice.nodeId, e->zEndPointId); - DEBUG_PC("\t AvailBw: %f, TotalBw: %f", edgeAvailBw, edgeTotalBw); + DEBUG_PC("\t Edge Att: AvailBw: %f, TotalBw: %f", edgeAvailBw, edgeTotalBw); // Check Service Bw constraint - if ((path_constraints->bw == TRUE) && (edgeAvailBw < path_constraints->bwConstraint)) + if ((path_constraints->bw == TRUE) && (edgeAvailBw < path_constraints->bwConstraint)) { continue; + } else { foundAvailBw = 1; break; @@ -1534,7 +1533,7 @@ gint check_link (struct nodeItem_t *u, gint indexGraphU, gint indexGraphV, struc } // BW constraint NOT MET, then DISCARD edge if ((path_constraints->bw == TRUE) && (foundAvailBw == 0)) { - DEBUG_PC ("AvailBw: %f < path_constraint: %f -- Discard Edge", edgeAvailBw, path_constraints->bwConstraint); + DEBUG_PC ("Edge AvailBw: %f < path_constraint: %f -- DISCARD Edge", edgeAvailBw, path_constraints->bwConstraint); g_free(path_constraints); return 0; } @@ -1580,13 +1579,14 @@ gint check_link (struct nodeItem_t *u, gint indexGraphU, gint indexGraphV, struc if (arg & ENERGY_EFFICIENT_ARGUMENT) { if (distance_through_u == v_map->distance) { if (power_through_u > v_map->power) { - DEBUG_PC("Energy (src -> u + u -> v: %f (Watts) >Energy (src, v): %f (Watts)--> DISCARD LINK", power_through_u, v_map->power); + DEBUG_PC("Energy (src -> u + u -> v: %f (Watts) > Energy (src, v): %f (Watts) --> DISCARD EDGE", power_through_u, v_map->power); return 0; } // same energy consumption, consider latency if ((power_through_u == v_map->power) && (latency_through_u > v_map->latency)) { return 0; } + // same energy, same latency, criteria: choose the one having the largest available bw if ((power_through_u == v_map->power) && (latency_through_u == v_map->latency) && (availBw_through_u < v_map->avaiBandwidth)) { return 0; } @@ -1603,8 +1603,9 @@ gint check_link (struct nodeItem_t *u, gint indexGraphU, gint indexGraphV, struc return 0; } } - DEBUG_PC ("%s --> %s Relaxed", u->node.nodeId, v->tVertice.nodeId); - DEBUG_PC ("\t AvailBw: %f Mb/s, Cost: %f, Latency: %f ms, Energy: %f Watts", availBw_through_u, distance_through_u, latency_through_u, power_through_u); + DEBUG_PC ("Edge %s --> %s [RELAXED]", u->node.nodeId, v->tVertice.nodeId); + DEBUG_PC ("\t path till %s: AvailBw: %f Mb/s | Cost: %f | Latency: %f ms | Energy: %f Watts", v->tVertice.nodeId, availBw_through_u, distance_through_u, + latency_through_u, power_through_u); // Update Q list -- struct nodeItem_t *nodeItem = g_malloc0 (sizeof (struct nodeItem_t)); @@ -1621,8 +1622,9 @@ gint check_link (struct nodeItem_t *u, gint indexGraphU, gint indexGraphV, struc if (arg & ENERGY_EFFICIENT_ARGUMENT) { *Q = g_list_insert_sorted(*Q, nodeItem, sort_by_energy); } - else + else { *Q = g_list_insert_sorted (*Q, nodeItem, sort_by_distance); + } // Update the mapNodes for the specific reached tv v_map->distance = distance_through_u; @@ -1634,9 +1636,9 @@ gint check_link (struct nodeItem_t *u, gint indexGraphU, gint indexGraphV, struc struct edges_t *e1 = &(v_map->predecessor); struct edges_t *e2 = &(v->edges[indexEdge]); duplicate_edge(e1, e2); - DEBUG_PC ("u->v Edge: %s(%s) --> %s(%s)", e2->aNodeId.nodeId, e2->aEndPointId, e2->zNodeId.nodeId, e2->zEndPointId); + //DEBUG_PC ("u->v Edge: %s(%s) --> %s(%s)", e2->aNodeId.nodeId, e2->aEndPointId, e2->zNodeId.nodeId, e2->zEndPointId); //DEBUG_PC("v-pred aTopology: %s", e2->aTopologyId); - DEBUG_PC("v-pred zTopology: %s", e2->zTopologyId); + //DEBUG_PC("v-pred zTopology: %s", e2->zTopologyId); // Check whether v is dstPEId //DEBUG_PC ("Targeted dstId: %s", s->service_endpoints_id[1].device_uuid); @@ -1658,7 +1660,7 @@ gint check_link (struct nodeItem_t *u, gint indexGraphU, gint indexGraphV, struc * @date 2022 */ ///////////////////////////////////////////////////////////////////////////////////////// -gboolean check_computed_path_feasability (struct service_t *s, struct compRouteOutputItem_t* p) { +gboolean check_computed_path_feasibility (struct service_t *s, struct compRouteOutputItem_t* p) { float epsilon = 0.0000001; struct path_constraints_t* pathCons = get_path_constraints(s); gboolean ret = TRUE; @@ -2345,7 +2347,7 @@ void build_contextSet_linklList(GList** set, gint activeFlag) { // for each link in linkList: // 1st- Retrieve endpoints A --> B feauture (context Id, device Id, endpoint Id) // 2st - In the graph associated to the contextId, check wheter A (deviceId) is in the vertices list - // o No, this is weird ... exist + // o No, this is weird ... exit // o Yes, get the other link endpoint (i.e., B) and check whether it exists. If NOT add it, considering // all the attributes; Otherwise, check whether the link is different from existing edges between A and B gdouble epsilon = 0.1; @@ -3064,7 +3066,7 @@ void dijkstra(gint srcMapIndex, gint dstMapIndex, struct graph_t* g, struct serv // if ingress of the root link (aNodeId) is the spurNode, then stops if (compare_node_id(&re->aNodeId, SN) == 0) { - DEBUG_PC("root Link: aNodeId: %s and spurNode: %s -- stop exploring the rootPath (RP)", re->aNodeId.nodeId, SN->nodeId); + DEBUG_PC("Ingress Node rootLink %s = spurNode %s; STOP exploring rootPath (RP)", re->aNodeId.nodeId, SN->nodeId); break; } // Extract from Q @@ -3072,7 +3074,6 @@ void dijkstra(gint srcMapIndex, gint dstMapIndex, struct graph_t* g, struct serv struct nodeItem_t* node = (struct nodeItem_t*)(listnode->data); Q = g_list_remove(Q, node); - //DEBUG_RL_RA ("Exploring node %s", node->node.nodeId); indexVertice = graph_vertice_lookup(node->node.nodeId, g); g_assert(indexVertice >= 0); @@ -3086,22 +3087,21 @@ void dijkstra(gint srcMapIndex, gint dstMapIndex, struct graph_t* g, struct serv } // Check that the first node in Q set is SpurNode, otherwise something went wrong ... if (compare_node_id(&re->aNodeId, SN) != 0) { - //DEBUG_PC ("root Link: aNodeId: %s is NOT the spurNode: %s -- something wrong", re->aNodeId.nodeId, SN->nodeId); + DEBUG_PC ("root Link: aNodeId: %s is NOT the spurNode: %s -- something wrong", re->aNodeId.nodeId, SN->nodeId); g_list_free_full(g_steal_pointer(&S), g_free); g_list_free_full(g_steal_pointer(&Q), g_free); return; } } - while (g_list_length(Q) > 0) { //Extract from Q set GList* listnode = g_list_first(Q); struct nodeItem_t* node = (struct nodeItem_t*)(listnode->data); Q = g_list_remove(Q, node); DEBUG_PC("Q length: %d", g_list_length(Q)); - DEBUG_PC("DeviceId: %s", node->node.nodeId); + DEBUG_PC("Explored DeviceId: %s", node->node.nodeId); - // visit all the links from u within the graph + // scan all the links from u within the graph indexVertice = graph_vertice_lookup(node->node.nodeId, g); g_assert(indexVertice >= 0); @@ -3139,18 +3139,19 @@ gint ksp_comp(struct pred_t* pred, struct graph_t* g, struct service_t* s, struct map_nodes_t* mapNodes, guint arg) { g_assert(pred); g_assert(g); g_assert(s); - DEBUG_PC("Source: %s -- Destination: %s", s->service_endpoints_id[0].device_uuid, s->service_endpoints_id[1].device_uuid); + DEBUG_PC("SOURCE: %s --> DESTINATION: %s", s->service_endpoints_id[0].device_uuid, + s->service_endpoints_id[1].device_uuid); // Check the both ingress src and dst endpoints are in the graph gint srcMapIndex = get_map_index_by_nodeId(s->service_endpoints_id[0].device_uuid, mapNodes); if (srcMapIndex == -1) { - DEBUG_PC("ingress DeviceId: %s NOT in the graph", s->service_endpoints_id[0].device_uuid); + DEBUG_PC("ingress DeviceId: %s NOT in G", s->service_endpoints_id[0].device_uuid); return -1; } gint dstMapIndex = get_map_index_by_nodeId(s->service_endpoints_id[1].device_uuid, mapNodes); if (dstMapIndex == -1) { - DEBUG_PC("egress DeviceId: %s NOT in the graph", s->service_endpoints_id[1].device_uuid); + DEBUG_PC("egress DeviceId: %s NOT in G", s->service_endpoints_id[1].device_uuid); return -1; } @@ -3164,17 +3165,17 @@ gint ksp_comp(struct pred_t* pred, struct graph_t* g, struct service_t* s, gint map_dstIndex = get_map_index_by_nodeId(s->service_endpoints_id[1].device_uuid, mapNodes); struct map_t* dest_map = &mapNodes->map[map_dstIndex]; if (!(dest_map->distance < INFINITY_COST)) { - DEBUG_PC("destination: %s NOT reachable", s->service_endpoints_id[1].device_uuid); + DEBUG_PC("DESTINATION: %s NOT reachable", s->service_endpoints_id[1].device_uuid); return -1; } DEBUG_PC("AvailBw @ %s is %f", dest_map->verticeId.nodeId, dest_map->avaiBandwidth); // Check that the computed available bandwidth is larger than 0.0 if (dest_map->avaiBandwidth <= (gfloat)0.0) { - DEBUG_PC("dst: %s NOT REACHABLE", s->service_endpoints_id[1].device_uuid); + DEBUG_PC("DESTINATION %s NOT REACHABLE", s->service_endpoints_id[1].device_uuid); return -1; } - DEBUG_PC("dst: %s REACHABLE", s->service_endpoints_id[1].device_uuid); + DEBUG_PC("DESTINATION %s REACHABLE", s->service_endpoints_id[1].device_uuid); // Handle predecessors build_predecessors(pred, s, mapNodes); return 1; @@ -3219,6 +3220,18 @@ void set_path_attributes(struct compRouteOutputItem_t* p, struct map_t* mapV) { void alg_comp(struct service_t* s, struct compRouteOutput_t* path, struct graph_t* g, guint arg) { g_assert(s); g_assert(path); g_assert(g); + // Check if the service specifies a nuumber of K paths to be explored/computed for the + // service. If not, compute that number; otherwise set the max number of explored + // computed paths to MAX_KSP_VALUE + guint maxK = 0; + if(s->kPaths_inspected == 0) { + maxK = MAX_KSP_VALUE; + } + else { + maxK = s->kPaths_inspected; + } + DEBUG_PC("The KSP considers K: %d", maxK); + // create map of devices/nodes to handle the path computation using the context struct map_nodes_t* mapNodes = create_map_node(); build_map_node(mapNodes, g); @@ -3228,6 +3241,9 @@ void alg_comp(struct service_t* s, struct compRouteOutput_t* path, struct graph_ struct service_endpoints_id_t* iEp = &(s->service_endpoints_id[0]); struct service_endpoints_id_t* eEp = &(s->service_endpoints_id[1]); + DEBUG_PC("======================================================================================="); + DEBUG_PC("STARTING PATH COMP FOR %s[%s] --> %s[%s]", iEp->device_uuid, iEp->endpoint_uuid, eEp->device_uuid, eEp->endpoint_uuid); + // Compute the 1st KSP path gint done = ksp_comp(predecessors, g, s, NULL, NULL, mapNodes, arg); if (done == -1) { @@ -3260,14 +3276,14 @@ void alg_comp(struct service_t* s, struct compRouteOutput_t* path, struct graph_ } path->num_service_endpoints_id = s->num_service_endpoints_id; - DEBUG_PC("COMPUTE UP TO K Feasible Paths A[%d]", MAX_KSP_VALUE); + DEBUG_PC("COMPUTE UP TO K Feasible Paths A[%d]", maxK); // Create A and B sets of paths to handle the YEN algorithm struct path_set_t *A = create_path_set(), *B = create_path_set(); // Add 1st Computed path into A->paths[0] duplicate_path(p, &A->paths[0]); A->numPaths++; g_free(predecessors); g_free(p); - for (gint k = 1; k < MAX_KSP_VALUE; k++) { + for (gint k = 1; k < maxK; k++) { DEBUG_PC("*************************** kth (%d) ***********************************", k); struct compRouteOutputItem_t* p = create_path_item(); duplicate_path(&A->paths[k - 1], p); @@ -3359,9 +3375,10 @@ void alg_comp(struct service_t* s, struct compRouteOutput_t* path, struct graph_ // copy the service endpoints, in general, there will be 2 (point-to-point network connectivity services) for (gint m = 0; m < s->num_service_endpoints_id; m++) { struct service_endpoints_id_t* iEp = &(s->service_endpoints_id[m]); - struct service_endpoints_id_t* oEp = &(s->service_endpoints_id[m]); + struct service_endpoints_id_t* oEp = &(path->service_endpoints_id[m]); copy_service_endpoint_id(oEp, iEp); } + path->num_service_endpoints_id = s->num_service_endpoints_id; // Print all the paths i A for (gint h = 0; h < A->numPaths; h++) { @@ -3371,27 +3388,33 @@ void alg_comp(struct service_t* s, struct compRouteOutput_t* path, struct graph_ DEBUG_PC("Number of paths: %d", path->numPaths); // For all the computed paths in A, pick the one being feasible wrt the service constraints for (gint ksp = 0; ksp < A->numPaths; ksp++) { - if (ksp >= MAX_KSP_VALUE) { - DEBUG_PC("Number Requested paths (%d) REACHED - STOP", ksp); + if (ksp >= s->kPaths_returned) { + DEBUG_PC("Number Requested/returned paths (%d) REACHED - STOP", ksp); break; } - gdouble feasibleRoute = check_computed_path_feasability(s, &A->paths[ksp]); + gdouble feasibleRoute = check_computed_path_feasibility(s, &A->paths[ksp]); if (feasibleRoute == TRUE) { - DEBUG_PC("A[%d] available: %f, pathCost: %f; latency: %f, Power: %f", ksp, A->paths[ksp].availCap, A->paths[ksp].cost, A->paths[ksp].delay, A->paths[ksp].power); + DEBUG_PC("A[%d] available: %f, pathCost: %f; latency: %f, Power: %f", ksp, A->paths[ksp].availCap, + A->paths[ksp].cost, A->paths[ksp].delay, A->paths[ksp].power); struct compRouteOutputItem_t* pathaux = &A->paths[ksp]; path->numPaths++; struct path_t* targetedPath = &path->paths[path->numPaths - 1]; duplicate_path_t(pathaux, targetedPath); print_path_t(targetedPath); - remove_path_set(A); - remove_path_set(B); - return; + //remove_path_set(A); + //remove_path_set(B); + //return; } } remove_path_set(A); remove_path_set(B); + // At least 1 out (K) paths was found, then K-SP succeded + if (path->numPaths > 0) { + DEBUG_PC("K-SP succeeded"); + return; + } // No paths found --> Issue DEBUG_PC("K-SP failed!!!"); comp_route_connection_issue_handler(path, s); return; -} \ No newline at end of file +} diff --git a/src/pathcomp/backend/pathComp_tools.h b/src/pathcomp/backend/pathComp_tools.h index 84334eb5e1d47199e8a71bb09c3b541625d66af2..49280d543a52ede118a1baee988484671bc76f8d 100644 --- a/src/pathcomp/backend/pathComp_tools.h +++ b/src/pathcomp/backend/pathComp_tools.h @@ -38,7 +38,7 @@ extern GList* activeServList; #define INFINITY_COST 0xFFFFFFFF #define MAX_NUM_PRED 100 -#define MAX_KSP_VALUE 3 +#define MAX_KSP_VALUE 5 // HTTP RETURN CODES #define HTTP_CODE_OK 200 @@ -126,6 +126,7 @@ struct map_nodes_t { #define MAX_NUM_VERTICES 100 // 100 # LGR: reduced from 100 to 20 to divide by 5 the memory used #define MAX_NUM_EDGES 5 // 100 # LGR: reduced from 100 to 5 to divide by 20 the memory used + // Structures for the graph composition struct targetNodes_t { // remote / targeted node @@ -352,11 +353,10 @@ struct constraint_t { #define MAX_NUM_SERVICE_ENPOINTS_ID 2 #define MAX_NUM_SERVICE_CONSTRAINTS 10 -struct service_t { - // Indentifier used to determine the used Algorithm Id, e.g., KSP - gchar algId[MAX_ALG_ID_LENGTH]; - // PATHS expected for the output - guint kPaths; +struct service_t { + gchar algId[MAX_ALG_ID_LENGTH]; // Indentifier used to determine the used Algorithm Id, e.g., KSP + guint kPaths_inspected; // PATHS expected to be inspected + guint kPaths_returned; // Maximum number of PATHS to be returned struct serviceId_t serviceId; guint service_type; // unknown, l2nm, l3nm, tapi @@ -430,7 +430,7 @@ struct pathLink_t { gchar zEndPointId[UUID_CHAR_LENGTH]; struct topology_id_t topologyId; - struct linkTopology_t linkTopologies[2]; // a p2p link (at most) can connect to devices (endpoints) attached to 2 different topologies + struct linkTopology_t linkTopologies[2]; // A p2p link (at most) can connect to devices (endpoints) attached to 2 different topologies gint numLinkTopologies; }; @@ -575,7 +575,7 @@ gboolean matching_path_rootPath (struct compRouteOutputItem_t *, struct compRout void modify_targeted_graph (struct graph_t *, struct path_set_t *, struct compRouteOutputItem_t *, struct nodes_t *); gint find_nodeId (gconstpointer, gconstpointer); gint check_link (struct nodeItem_t *, gint, gint, struct graph_t *, struct service_t *, GList **, GList **, struct map_nodes_t *, guint arg); -gboolean check_computed_path_feasability (struct service_t *, struct compRouteOutputItem_t * ); +gboolean check_computed_path_feasibility (struct service_t *, struct compRouteOutputItem_t * ); gint sort_by_distance (gconstpointer, gconstpointer); gint sort_by_energy(gconstpointer, gconstpointer); @@ -617,4 +617,4 @@ void destroy_context(struct context_t*); void dijkstra(gint, gint, struct graph_t*, struct service_t*, struct map_nodes_t*, struct nodes_t*, struct compRouteOutputItem_t*, guint); void set_path_attributes(struct compRouteOutputItem_t*, struct map_t*); void alg_comp(struct service_t*, struct compRouteOutput_t*, struct graph_t*, guint); -#endif \ No newline at end of file +#endif diff --git a/src/pathcomp/frontend/service/__main__.py b/src/pathcomp/frontend/service/__main__.py index e3f7d36196be4319cf8364d8569d5289bec2dd89..00da647752e870acc132fc8a96e506ef7327ffa3 100644 --- a/src/pathcomp/frontend/service/__main__.py +++ b/src/pathcomp/frontend/service/__main__.py @@ -53,7 +53,7 @@ def main(): grpc_service.start() # Wait for Ctrl+C or termination signal - while not terminate.wait(timeout=0.1): pass + while not terminate.wait(timeout=1.0): pass LOGGER.info('Terminating...') grpc_service.stop() diff --git a/src/pathcomp/frontend/service/algorithms/KShortestPathAlgorithm.py b/src/pathcomp/frontend/service/algorithms/KShortestPathAlgorithm.py index 920d72e828f6f84bc064f1c7357105907ffdac4c..e0fbbe08a1c01402573333f89b1118b6618cc7ce 100644 --- a/src/pathcomp/frontend/service/algorithms/KShortestPathAlgorithm.py +++ b/src/pathcomp/frontend/service/algorithms/KShortestPathAlgorithm.py @@ -26,4 +26,5 @@ class KShortestPathAlgorithm(_Algorithm): for service_request in self.service_list: service_request['algId' ] = self.algorithm_id service_request['syncPaths'] = self.sync_paths - service_request['kPaths' ] = self.k_return + service_request['kPaths_inspection'] = self.k_inspection + service_request['kPaths_return' ] = self.k_return diff --git a/src/pathcomp/frontend/service/algorithms/_Algorithm.py b/src/pathcomp/frontend/service/algorithms/_Algorithm.py index b486ec1b59457b1ac575fb6197c7713b10c306e3..0eb01c1341421e379850f89d5671d9156c8a9fd6 100644 --- a/src/pathcomp/frontend/service/algorithms/_Algorithm.py +++ b/src/pathcomp/frontend/service/algorithms/_Algorithm.py @@ -253,6 +253,7 @@ class _Algorithm: for connection in connections: connection_uuid,service_type,path_hops,_ = connection service_key = (context_uuid, connection_uuid) + if service_key in grpc_services: continue grpc_service = self.add_service_to_reply( reply, context_uuid, connection_uuid, service_type, path_hops=path_hops, config_rules=orig_config_rules) @@ -265,10 +266,9 @@ class _Algorithm: grpc_service = grpc_services.get(service_key) if grpc_service is None: raise Exception('Service({:s}) not found'.format(str(service_key))) - grpc_connection = grpc_connections.get(connection_uuid) - if grpc_connection is not None: continue + #if connection_uuid in grpc_connections: continue grpc_connection = self.add_connection_to_reply(reply, connection_uuid, grpc_service, path_hops) - grpc_connections[connection_uuid] = grpc_connection + #grpc_connections[connection_uuid] = grpc_connection for sub_service_uuid in dependencies: sub_service_key = (context_uuid, sub_service_uuid) @@ -281,11 +281,11 @@ class _Algorithm: # ... "path-capacity": {"total-size": {"value": 200, "unit": 0}}, # ... "path-latency": {"fixed-latency-characteristic": "10.000000"}, # ... "path-cost": {"cost-name": "", "cost-value": "5.000000", "cost-algorithm": "0.000000"}, - #path_capacity = service_path['path-capacity']['total-size'] + #path_capacity = service_path_ero['path-capacity']['total-size'] #path_capacity_value = path_capacity['value'] #path_capacity_unit = CapacityUnit(path_capacity['unit']) - #path_latency = service_path['path-latency']['fixed-latency-characteristic'] - #path_cost = service_path['path-cost'] + #path_latency = service_path_ero['path-latency']['fixed-latency-characteristic'] + #path_cost = service_path_ero['path-cost'] #path_cost_name = path_cost['cost-name'] #path_cost_value = path_cost['cost-value'] #path_cost_algorithm = path_cost['cost-algorithm'] diff --git a/src/pathcomp/frontend/service/algorithms/tools/ResourceGroups.py b/src/pathcomp/frontend/service/algorithms/tools/ResourceGroups.py index e56d436dd006197497d7774be598a480a134320c..c1591dbeb7c71c950135b92446849569bcd781f8 100644 --- a/src/pathcomp/frontend/service/algorithms/tools/ResourceGroups.py +++ b/src/pathcomp/frontend/service/algorithms/tools/ResourceGroups.py @@ -36,8 +36,8 @@ DEVICE_TYPE_TO_DEEPNESS = { DeviceTypeEnum.EMULATED_XR_CONSTELLATION.value : 40, DeviceTypeEnum.XR_CONSTELLATION.value : 40, - DeviceTypeEnum.EMULATED_MICROWAVE_RADIO_SYSTEM.value : 30, - DeviceTypeEnum.MICROWAVE_RADIO_SYSTEM.value : 30, + DeviceTypeEnum.EMULATED_MICROWAVE_RADIO_SYSTEM.value : 40, + DeviceTypeEnum.MICROWAVE_RADIO_SYSTEM.value : 40, DeviceTypeEnum.EMULATED_OPEN_LINE_SYSTEM.value : 30, DeviceTypeEnum.OPEN_LINE_SYSTEM.value : 30, @@ -57,11 +57,13 @@ IGNORED_DEVICE_TYPES = {DeviceTypeEnum.EMULATED_OPTICAL_SPLITTER} def get_device_controller_uuid( device : Device ) -> Optional[str]: - for config_rule in device.device_config.config_rules: - if config_rule.WhichOneof('config_rule') != 'custom': continue - if config_rule.custom.resource_key != '_controller': continue - device_controller_id = json.loads(config_rule.custom.resource_value) - return device_controller_id['uuid'] + controller_uuid = device.controller_id.device_uuid.uuid + if len(controller_uuid) > 0: return controller_uuid + #for config_rule in device.device_config.config_rules: + # if config_rule.WhichOneof('config_rule') != 'custom': continue + # if config_rule.custom.resource_key != '_controller': continue + # device_controller_id = json.loads(config_rule.custom.resource_value) + # return device_controller_id['uuid'] return None def _map_device_type(device : Device) -> DeviceTypeEnum: diff --git a/src/policy/src/main/java/eu/teraflow/policy/Serializer.java b/src/policy/src/main/java/eu/teraflow/policy/Serializer.java index 967d1d6e604e312fe9d8314beea023f902af776b..52d594ea4200c2ce4d775edf2f06cf7a9c9f9097 100644 --- a/src/policy/src/main/java/eu/teraflow/policy/Serializer.java +++ b/src/policy/src/main/java/eu/teraflow/policy/Serializer.java @@ -1124,8 +1124,12 @@ public class Serializer { return ContextOuterClass.ServiceStatusEnum.SERVICESTATUS_PLANNED; case PENDING_REMOVAL: return ContextOuterClass.ServiceStatusEnum.SERVICESTATUS_PENDING_REMOVAL; + case SLA_VIOLATED: + return ContextOuterClass.ServiceStatusEnum.SERVICESTATUS_SLA_VIOLATED; case UNDEFINED: return ContextOuterClass.ServiceStatusEnum.SERVICESTATUS_UNDEFINED; + case UPDATING: + return ContextOuterClass.ServiceStatusEnum.SERVICESTATUS_UPDATING; default: return ContextOuterClass.ServiceStatusEnum.UNRECOGNIZED; } @@ -1140,6 +1144,10 @@ public class Serializer { return ServiceStatusEnum.PLANNED; case SERVICESTATUS_PENDING_REMOVAL: return ServiceStatusEnum.PENDING_REMOVAL; + case SERVICESTATUS_SLA_VIOLATED: + return ServiceStatusEnum.SLA_VIOLATED; + case SERVICESTATUS_UPDATING: + return ServiceStatusEnum.UPDATING; case SERVICESTATUS_UNDEFINED: case UNRECOGNIZED: default: diff --git a/src/policy/src/test/java/eu/teraflow/policy/SerializerTest.java b/src/policy/src/test/java/eu/teraflow/policy/SerializerTest.java index b0fb90864ce32bf6b793dded5d1f9de1dfba5097..f06c30204b874cd6be30cd1a906c5087412e9640 100644 --- a/src/policy/src/test/java/eu/teraflow/policy/SerializerTest.java +++ b/src/policy/src/test/java/eu/teraflow/policy/SerializerTest.java @@ -1910,6 +1910,12 @@ class SerializerTest { Arguments.of( ServiceStatusEnum.PENDING_REMOVAL, ContextOuterClass.ServiceStatusEnum.SERVICESTATUS_PENDING_REMOVAL), + Arguments.of( + ServiceStatusEnum.SLA_VIOLATED, + ContextOuterClass.ServiceStatusEnum.SERVICESTATUS_SLA_VIOLATED), + Arguments.of( + ServiceStatusEnum.UPDATING, + ContextOuterClass.ServiceStatusEnum.SERVICESTATUS_UPDATING), Arguments.of( ServiceStatusEnum.UNDEFINED, ContextOuterClass.ServiceStatusEnum.SERVICESTATUS_UNDEFINED)); diff --git a/src/service/client/ServiceClient.py b/src/service/client/ServiceClient.py index 30ff4f4838dd52d7010f08a7814ff208afbe92f4..e8ea478a3109d3e006120db9f22966724773b78b 100644 --- a/src/service/client/ServiceClient.py +++ b/src/service/client/ServiceClient.py @@ -65,3 +65,10 @@ class ServiceClient: response = self.stub.DeleteService(request) LOGGER.debug('DeleteService result: {:s}'.format(grpc_message_to_json_string(response))) return response + + @RETRY_DECORATOR + def RecomputeConnections(self, request : Service) -> Empty: + LOGGER.debug('RecomputeConnections request: {:s}'.format(grpc_message_to_json_string(request))) + response = self.stub.RecomputeConnections(request) + LOGGER.debug('RecomputeConnections result: {:s}'.format(grpc_message_to_json_string(response))) + return response diff --git a/src/service/service/ServiceServiceServicerImpl.py b/src/service/service/ServiceServiceServicerImpl.py index 6531376b84732b1ec80e335cfc6cd816be944b0a..6d23fd4cee53d1639c9eefbd943d45dab497b253 100644 --- a/src/service/service/ServiceServiceServicerImpl.py +++ b/src/service/service/ServiceServiceServicerImpl.py @@ -12,17 +12,20 @@ # See the License for the specific language governing permissions and # limitations under the License. -import grpc, json, logging +import grpc, json, logging, random, uuid from typing import Optional from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method -from common.method_wrappers.ServiceExceptions import AlreadyExistsException, InvalidArgumentException -from common.proto.context_pb2 import Empty, Service, ServiceId, ServiceStatusEnum, ServiceTypeEnum +from common.method_wrappers.ServiceExceptions import ( + AlreadyExistsException, InvalidArgumentException, NotFoundException, NotImplementedException, + OperationFailedException) +from common.proto.context_pb2 import Connection, Empty, Service, ServiceId, ServiceStatusEnum, ServiceTypeEnum from common.proto.pathcomp_pb2 import PathCompRequest from common.proto.service_pb2_grpc import ServiceServiceServicer from common.tools.context_queries.Service import get_service_by_id from common.tools.grpc.Tools import grpc_message_to_json, grpc_message_to_json_string from context.client.ContextClient import ContextClient from pathcomp.frontend.client.PathCompClient import PathCompClient +from service.service.tools.ConnectionToString import connection_to_string from .service_handler_api.ServiceHandlerFactory import ServiceHandlerFactory from .task_scheduler.TaskScheduler import TasksScheduler @@ -168,3 +171,160 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): tasks_scheduler.compose_from_service(service, is_delete=True) tasks_scheduler.execute_all() return Empty() + + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) + def RecomputeConnections(self, request : Service, context : grpc.ServicerContext) -> Empty: + if len(request.service_endpoint_ids) > 0: + raise NotImplementedException('update-endpoints') + + if len(request.service_constraints) > 0: + raise NotImplementedException('update-constraints') + + if len(request.service_config.config_rules) > 0: + raise NotImplementedException('update-config-rules') + + context_client = ContextClient() + + updated_service : Optional[Service] = get_service_by_id( + context_client, request.service_id, rw_copy=True, + include_config_rules=False, include_constraints=False, include_endpoint_ids=False) + + if updated_service is None: + raise NotFoundException('service', request.service_id.service_uuid.uuid) + + # pylint: disable=no-member + if updated_service.service_type == ServiceTypeEnum.SERVICETYPE_UNKNOWN: + raise InvalidArgumentException( + 'request.service_type', ServiceTypeEnum.Name(updated_service.service_type) + ) + + # Set service status to "SERVICESTATUS_UPDATING" to ensure rest of components are aware the service is + # being modified. + # pylint: disable=no-member + updated_service.service_status.service_status = ServiceStatusEnum.SERVICESTATUS_UPDATING + + # Update endpoints + # pylint: disable=no-member + #del updated_service.service_endpoint_ids[:] + #updated_service.service_endpoint_ids.extend(request.service_endpoint_ids) + + # Update constraints + # pylint: disable=no-member + #del updated_service.service_constraints[:] + #updated_service.service_constraints.extend(request.service_constraints) + + # Update config rules + # pylint: disable=no-member + #del updated_service.service_config.config_rules[:] + #updated_service.service_config.config_rules.extend(request.service_config.config_rules) + + updated_service_id_with_uuids = context_client.SetService(updated_service) + + # PathComp requires endpoints, constraints and config rules + updated_service_with_uuids = get_service_by_id( + context_client, updated_service_id_with_uuids, rw_copy=True, + include_config_rules=True, include_constraints=True, include_endpoint_ids=True) + + # Get active connection + connections = context_client.ListConnections(updated_service_id_with_uuids) + if len(connections.connections) == 0: + MSG = 'Service({:s}) has no connections' + str_service_id = grpc_message_to_json_string(updated_service_id_with_uuids) + str_extra_details = MSG.format(str_service_id) + raise NotImplementedException('service-with-no-connections', extra_details=str_extra_details) + if len(connections.connections) > 1: + MSG = 'Service({:s}) has multiple ({:d}) connections({:s})' + str_service_id = grpc_message_to_json_string(updated_service_id_with_uuids) + num_connections = len(connections.connections) + str_connections = grpc_message_to_json_string(connections) + str_extra_details = MSG.format(str_service_id, num_connections, str_connections) + raise NotImplementedException('service-with-multiple-connections', extra_details=str_extra_details) + + old_connection = connections.connections[0] + if len(old_connection.sub_service_ids) > 0: + MSG = 'Service({:s})/Connection({:s}) has sub-services: {:s}' + str_service_id = grpc_message_to_json_string(updated_service_id_with_uuids) + str_connection_id = grpc_message_to_json_string(old_connection.connection_id) + str_connection = grpc_message_to_json_string(old_connection) + str_extra_details = MSG.format(str_service_id, str_connection_id, str_connection) + raise NotImplementedException('service-connection-with-subservices', extra_details=str_extra_details) + + # Find alternative connections + # pylint: disable=no-member + pathcomp_request = PathCompRequest() + pathcomp_request.services.append(updated_service_with_uuids) + #pathcomp_request.k_disjoint_path.num_disjoint = 100 + pathcomp_request.k_shortest_path.k_inspection = 100 + pathcomp_request.k_shortest_path.k_return = 3 + + LOGGER.debug('pathcomp_request={:s}'.format(grpc_message_to_json_string(pathcomp_request))) + pathcomp = PathCompClient() + pathcomp_reply = pathcomp.Compute(pathcomp_request) + pathcomp.close() + LOGGER.debug('pathcomp_reply={:s}'.format(grpc_message_to_json_string(pathcomp_reply))) + + if len(pathcomp_reply.services) == 0: + MSG = 'KDisjointPath reported no services for Service({:s}): {:s}' + str_service_id = grpc_message_to_json_string(updated_service_id_with_uuids) + str_pathcomp_reply = grpc_message_to_json_string(pathcomp_reply) + str_extra_details = MSG.format(str_service_id, str_pathcomp_reply) + raise NotImplementedException('kdisjointpath-no-services', extra_details=str_extra_details) + + if len(pathcomp_reply.services) > 1: + MSG = 'KDisjointPath reported subservices for Service({:s}): {:s}' + str_service_id = grpc_message_to_json_string(updated_service_id_with_uuids) + str_pathcomp_reply = grpc_message_to_json_string(pathcomp_reply) + str_extra_details = MSG.format(str_service_id, str_pathcomp_reply) + raise NotImplementedException('kdisjointpath-subservices', extra_details=str_extra_details) + + if len(pathcomp_reply.connections) == 0: + MSG = 'KDisjointPath reported no connections for Service({:s}): {:s}' + str_service_id = grpc_message_to_json_string(updated_service_id_with_uuids) + str_pathcomp_reply = grpc_message_to_json_string(pathcomp_reply) + str_extra_details = MSG.format(str_service_id, str_pathcomp_reply) + raise NotImplementedException('kdisjointpath-no-connections', extra_details=str_extra_details) + + # compute a string representing the old connection + str_old_connection = connection_to_string(old_connection) + + LOGGER.debug('old_connection={:s}'.format(grpc_message_to_json_string(old_connection))) + + candidate_new_connections = list() + for candidate_new_connection in pathcomp_reply.connections: + str_candidate_new_connection = connection_to_string(candidate_new_connection) + if str_candidate_new_connection == str_old_connection: continue + candidate_new_connections.append(candidate_new_connection) + + if len(candidate_new_connections) == 0: + MSG = 'Unable to find a new suitable path: pathcomp_request={:s} pathcomp_reply={:s} old_connection={:s}' + str_pathcomp_request = grpc_message_to_json_string(pathcomp_request) + str_pathcomp_reply = grpc_message_to_json_string(pathcomp_reply) + str_old_connection = grpc_message_to_json_string(old_connection) + extra_details = MSG.format(str_pathcomp_request, str_pathcomp_reply, str_old_connection) + raise OperationFailedException('no-new-path-found', extra_details=extra_details) + + str_candidate_new_connections = [ + grpc_message_to_json_string(candidate_new_connection) + for candidate_new_connection in candidate_new_connections + ] + LOGGER.debug('candidate_new_connections={:s}'.format(str(str_candidate_new_connections))) + + new_connection = random.choice(candidate_new_connections) + LOGGER.debug('new_connection={:s}'.format(grpc_message_to_json_string(new_connection))) + + # Change UUID of new connection to prevent collisions + tmp_connection = Connection() + tmp_connection.CopyFrom(new_connection) + tmp_connection.connection_id.connection_uuid.uuid = str(uuid.uuid4()) + new_connection = tmp_connection + + # Feed TaskScheduler with the service to update, the old connection to + # deconfigure and the new connection to configure. It will produce a + # schedule of tasks (an ordered list of tasks to be executed) to + # implement the requested changes. + tasks_scheduler = TasksScheduler(self.service_handler_factory) + tasks_scheduler.compose_service_connection_update( + updated_service_with_uuids, old_connection, new_connection) + tasks_scheduler.execute_all() + + return Empty() diff --git a/src/service/service/task_scheduler/TaskExecutor.py b/src/service/service/task_scheduler/TaskExecutor.py index acda45ce80a62a4a3723744546968e3195799b59..ae0f1be7da291a5dc025641cb606f7a7706059ca 100644 --- a/src/service/service/task_scheduler/TaskExecutor.py +++ b/src/service/service/task_scheduler/TaskExecutor.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import json, logging +import logging #, json from enum import Enum from typing import TYPE_CHECKING, Any, Dict, Optional, Union from common.method_wrappers.ServiceExceptions import NotFoundException @@ -20,7 +20,7 @@ from common.proto.context_pb2 import Connection, ConnectionId, Device, DeviceDri from common.tools.context_queries.Connection import get_connection_by_id from common.tools.context_queries.Device import get_device from common.tools.context_queries.Service import get_service_by_id -from common.tools.grpc.Tools import grpc_message_list_to_json_string +from common.tools.grpc.Tools import grpc_message_to_json_string from common.tools.object_factory.Device import json_device_id from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient @@ -113,16 +113,18 @@ class TaskExecutor: self._store_grpc_object(CacheableObjectType.DEVICE, device_key, device) def get_device_controller(self, device : Device) -> Optional[Device]: - json_controller = None - for config_rule in device.device_config.config_rules: - if config_rule.WhichOneof('config_rule') != 'custom': continue - if config_rule.custom.resource_key != '_controller': continue - json_controller = json.loads(config_rule.custom.resource_value) - break - - if json_controller is None: return None - - controller_uuid = json_controller['uuid'] + #json_controller = None + #for config_rule in device.device_config.config_rules: + # if config_rule.WhichOneof('config_rule') != 'custom': continue + # if config_rule.custom.resource_key != '_controller': continue + # json_controller = json.loads(config_rule.custom.resource_value) + # break + + #if json_controller is None: return None + + #controller_uuid = json_controller['uuid'] + controller_uuid = device.controller_id.device_uuid.uuid + if len(controller_uuid) == 0: return None controller = self.get_device(DeviceId(**json_device_id(controller_uuid))) controller_uuid = controller.device_id.device_uuid.uuid if controller is None: raise Exception('Device({:s}) not found'.format(str(controller_uuid))) @@ -188,7 +190,7 @@ class TaskExecutor: } LOGGER.exception( 'Unable to select service handler. service={:s} connection={:s} connection_devices={:s}'.format( - grpc_message_list_to_json_string(service), grpc_message_list_to_json_string(connection), + grpc_message_to_json_string(service), grpc_message_to_json_string(connection), str(dict_connection_devices) ) ) diff --git a/src/service/service/task_scheduler/TaskScheduler.py b/src/service/service/task_scheduler/TaskScheduler.py index fbc554aa261cbc68009258d322aa01d52bfe760d..fceed36e92771394dff9e9f45ef928a0175b8d32 100644 --- a/src/service/service/task_scheduler/TaskScheduler.py +++ b/src/service/service/task_scheduler/TaskScheduler.py @@ -198,15 +198,57 @@ class TasksScheduler: t1 = time.time() LOGGER.debug('[compose_from_service] elapsed_time: {:f} sec'.format(t1-t0)) + def compose_service_connection_update( + self, service : Service, old_connection : Connection, new_connection : Connection + ) -> None: + t0 = time.time() + + self._add_service_to_executor_cache(service) + self._add_connection_to_executor_cache(old_connection) + self._add_connection_to_executor_cache(new_connection) + + service_updating_key = self._add_task_if_not_exists(Task_ServiceSetStatus( + self._executor, service.service_id, ServiceStatusEnum.SERVICESTATUS_UPDATING)) + + old_connection_deconfigure_key = self._add_task_if_not_exists(Task_ConnectionDeconfigure( + self._executor, old_connection.connection_id)) + + new_connection_configure_key = self._add_task_if_not_exists(Task_ConnectionConfigure( + self._executor, new_connection.connection_id)) + + service_active_key = self._add_task_if_not_exists(Task_ServiceSetStatus( + self._executor, service.service_id, ServiceStatusEnum.SERVICESTATUS_ACTIVE)) + + # the old connection deconfiguration depends on service being in updating state + self._dag.add(old_connection_deconfigure_key, service_updating_key) + + # the new connection configuration depends on service being in updating state + self._dag.add(new_connection_configure_key, service_updating_key) + + # the new connection configuration depends on the old connection having been deconfigured + self._dag.add(new_connection_configure_key, old_connection_deconfigure_key) + + # re-activating the service depends on the service being in updating state before + self._dag.add(service_active_key, service_updating_key) + + # re-activating the service depends on the new connection having been configured + self._dag.add(service_active_key, new_connection_configure_key) + + t1 = time.time() + LOGGER.debug('[compose_service_connection_update] elapsed_time: {:f} sec'.format(t1-t0)) + def execute_all(self, dry_run : bool = False) -> None: ordered_task_keys = list(self._dag.static_order()) LOGGER.debug('[execute_all] ordered_task_keys={:s}'.format(str(ordered_task_keys))) results = [] for task_key in ordered_task_keys: + str_task_name = ('DRY ' if dry_run else '') + str(task_key) + LOGGER.debug('[execute_all] starting task {:s}'.format(str_task_name)) task = self._tasks.get(task_key) succeeded = True if dry_run else task.execute() results.append(succeeded) + LOGGER.debug('[execute_all] finished task {:s} ; succeeded={:s}'.format(str_task_name, str(succeeded))) LOGGER.debug('[execute_all] results={:s}'.format(str(results))) return zip(ordered_task_keys, results) diff --git a/src/service/service/tools/ConnectionToString.py b/src/service/service/tools/ConnectionToString.py new file mode 100644 index 0000000000000000000000000000000000000000..1c189e00ff9004dc0929f58a02560e8bea69fa91 --- /dev/null +++ b/src/service/service/tools/ConnectionToString.py @@ -0,0 +1,25 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import List +from common.proto.context_pb2 import Connection + +def connection_to_string(connection : Connection) -> str: + str_device_endpoint_uuids : List[str] = list() + for endpoint_id in connection.path_hops_endpoint_ids: + device_uuid = endpoint_id.device_id.device_uuid.uuid + endpoint_uuid = endpoint_id.endpoint_uuid.uuid + device_endpoint_uuid = '{:s}:{:s}'.format(device_uuid, endpoint_uuid) + str_device_endpoint_uuids.append(device_endpoint_uuid) + return ','.join(str_device_endpoint_uuids) diff --git a/src/service/tests/descriptors_recompute_conns.json b/src/service/tests/descriptors_recompute_conns.json new file mode 100644 index 0000000000000000000000000000000000000000..dd571ccb6b2ff61ca0e581780ca02d71171bb894 --- /dev/null +++ b/src/service/tests/descriptors_recompute_conns.json @@ -0,0 +1,239 @@ +{ + "contexts": [ + {"context_id": {"context_uuid": {"uuid": "admin"}}} + ], + "topologies": [ + {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}} + ], + "devices": [ + { + "device_id": {"device_uuid": {"uuid": "R1"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 1, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper", "uuid": "1/1"}, + {"sample_types": [], "type": "copper", "uuid": "1/2"}, + {"sample_types": [], "type": "copper", "uuid": "1/3"}, + {"sample_types": [], "type": "copper", "uuid": "1/4"}, + {"sample_types": [], "type": "copper", "uuid": "1/5"}, + {"sample_types": [], "type": "copper", "uuid": "1/6"}, + {"sample_types": [], "type": "copper", "uuid": "2/1"}, + {"sample_types": [], "type": "copper", "uuid": "2/2"}, + {"sample_types": [], "type": "copper", "uuid": "2/3"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R2"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 1, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper", "uuid": "1/1"}, + {"sample_types": [], "type": "copper", "uuid": "1/2"}, + {"sample_types": [], "type": "copper", "uuid": "1/3"}, + {"sample_types": [], "type": "copper", "uuid": "1/4"}, + {"sample_types": [], "type": "copper", "uuid": "1/5"}, + {"sample_types": [], "type": "copper", "uuid": "1/6"}, + {"sample_types": [], "type": "copper", "uuid": "2/1"}, + {"sample_types": [], "type": "copper", "uuid": "2/2"}, + {"sample_types": [], "type": "copper", "uuid": "2/3"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R3"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 1, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper", "uuid": "1/1"}, + {"sample_types": [], "type": "copper", "uuid": "1/2"}, + {"sample_types": [], "type": "copper", "uuid": "1/3"}, + {"sample_types": [], "type": "copper", "uuid": "1/4"}, + {"sample_types": [], "type": "copper", "uuid": "1/5"}, + {"sample_types": [], "type": "copper", "uuid": "1/6"}, + {"sample_types": [], "type": "copper", "uuid": "2/1"}, + {"sample_types": [], "type": "copper", "uuid": "2/2"}, + {"sample_types": [], "type": "copper", "uuid": "2/3"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R4"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 1, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper", "uuid": "1/1"}, + {"sample_types": [], "type": "copper", "uuid": "1/2"}, + {"sample_types": [], "type": "copper", "uuid": "1/3"}, + {"sample_types": [], "type": "copper", "uuid": "1/4"}, + {"sample_types": [], "type": "copper", "uuid": "1/5"}, + {"sample_types": [], "type": "copper", "uuid": "1/6"}, + {"sample_types": [], "type": "copper", "uuid": "2/1"}, + {"sample_types": [], "type": "copper", "uuid": "2/2"}, + {"sample_types": [], "type": "copper", "uuid": "2/3"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R5"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 1, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper", "uuid": "1/1"}, + {"sample_types": [], "type": "copper", "uuid": "1/2"}, + {"sample_types": [], "type": "copper", "uuid": "1/3"}, + {"sample_types": [], "type": "copper", "uuid": "1/4"}, + {"sample_types": [], "type": "copper", "uuid": "1/5"}, + {"sample_types": [], "type": "copper", "uuid": "1/6"}, + {"sample_types": [], "type": "copper", "uuid": "2/1"}, + {"sample_types": [], "type": "copper", "uuid": "2/2"}, + {"sample_types": [], "type": "copper", "uuid": "2/3"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R6"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 1, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper", "uuid": "1/1"}, + {"sample_types": [], "type": "copper", "uuid": "1/2"}, + {"sample_types": [], "type": "copper", "uuid": "1/3"}, + {"sample_types": [], "type": "copper", "uuid": "1/4"}, + {"sample_types": [], "type": "copper", "uuid": "1/5"}, + {"sample_types": [], "type": "copper", "uuid": "1/6"}, + {"sample_types": [], "type": "copper", "uuid": "2/1"}, + {"sample_types": [], "type": "copper", "uuid": "2/2"}, + {"sample_types": [], "type": "copper", "uuid": "2/3"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R7"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 1, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper", "uuid": "1/1"}, + {"sample_types": [], "type": "copper", "uuid": "1/2"}, + {"sample_types": [], "type": "copper", "uuid": "1/3"}, + {"sample_types": [], "type": "copper", "uuid": "2/1"}, + {"sample_types": [], "type": "copper", "uuid": "2/2"}, + {"sample_types": [], "type": "copper", "uuid": "2/3"}, + {"sample_types": [], "type": "copper", "uuid": "2/4"}, + {"sample_types": [], "type": "copper", "uuid": "2/5"}, + {"sample_types": [], "type": "copper", "uuid": "2/6"} + ]}}} + ]} + } + ], + "links": [ + {"link_id": {"link_uuid": {"uuid": "R1==R2"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R1"}}, "endpoint_uuid": {"uuid": "2/1"}}, + {"device_id": {"device_uuid": {"uuid": "R2"}}, "endpoint_uuid": {"uuid": "2/2"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R1==R6"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R1"}}, "endpoint_uuid": {"uuid": "2/2"}}, + {"device_id": {"device_uuid": {"uuid": "R6"}}, "endpoint_uuid": {"uuid": "2/1"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R1==R7"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R1"}}, "endpoint_uuid": {"uuid": "2/3"}}, + {"device_id": {"device_uuid": {"uuid": "R7"}}, "endpoint_uuid": {"uuid": "2/1"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R2==R1"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R2"}}, "endpoint_uuid": {"uuid": "2/2"}}, + {"device_id": {"device_uuid": {"uuid": "R1"}}, "endpoint_uuid": {"uuid": "2/1"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R2==R3"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R2"}}, "endpoint_uuid": {"uuid": "2/1"}}, + {"device_id": {"device_uuid": {"uuid": "R3"}}, "endpoint_uuid": {"uuid": "2/2"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R3==R2"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R3"}}, "endpoint_uuid": {"uuid": "2/2"}}, + {"device_id": {"device_uuid": {"uuid": "R2"}}, "endpoint_uuid": {"uuid": "2/1"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R3==R4"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R3"}}, "endpoint_uuid": {"uuid": "2/1"}}, + {"device_id": {"device_uuid": {"uuid": "R4"}}, "endpoint_uuid": {"uuid": "2/2"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R3==R7"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R3"}}, "endpoint_uuid": {"uuid": "2/3"}}, + {"device_id": {"device_uuid": {"uuid": "R7"}}, "endpoint_uuid": {"uuid": "2/3"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R4==R3"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R4"}}, "endpoint_uuid": {"uuid": "2/2"}}, + {"device_id": {"device_uuid": {"uuid": "R3"}}, "endpoint_uuid": {"uuid": "2/1"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R4==R5"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R4"}}, "endpoint_uuid": {"uuid": "2/1"}}, + {"device_id": {"device_uuid": {"uuid": "R5"}}, "endpoint_uuid": {"uuid": "2/2"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R5==R4"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R5"}}, "endpoint_uuid": {"uuid": "2/2"}}, + {"device_id": {"device_uuid": {"uuid": "R4"}}, "endpoint_uuid": {"uuid": "2/1"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R5==R6"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R5"}}, "endpoint_uuid": {"uuid": "2/1"}}, + {"device_id": {"device_uuid": {"uuid": "R6"}}, "endpoint_uuid": {"uuid": "2/2"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R5==R7"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R5"}}, "endpoint_uuid": {"uuid": "2/3"}}, + {"device_id": {"device_uuid": {"uuid": "R7"}}, "endpoint_uuid": {"uuid": "2/5"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R6==R1"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R6"}}, "endpoint_uuid": {"uuid": "2/1"}}, + {"device_id": {"device_uuid": {"uuid": "R1"}}, "endpoint_uuid": {"uuid": "2/2"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R6==R5"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R6"}}, "endpoint_uuid": {"uuid": "2/2"}}, + {"device_id": {"device_uuid": {"uuid": "R5"}}, "endpoint_uuid": {"uuid": "2/1"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R7==R1"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R7"}}, "endpoint_uuid": {"uuid": "2/1"}}, + {"device_id": {"device_uuid": {"uuid": "R1"}}, "endpoint_uuid": {"uuid": "2/3"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R7==R3"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R7"}}, "endpoint_uuid": {"uuid": "2/3"}}, + {"device_id": {"device_uuid": {"uuid": "R3"}}, "endpoint_uuid": {"uuid": "2/3"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "R7==R5"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R7"}}, "endpoint_uuid": {"uuid": "2/5"}}, + {"device_id": {"device_uuid": {"uuid": "R5"}}, "endpoint_uuid": {"uuid": "2/3"}} + ]} + ], + "services": [ + { + "service_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, "service_uuid": {"uuid": "test-svc"} + }, + "service_type": 2, + "service_status": {"service_status": 1}, + "service_endpoint_ids": [ + {"device_id":{"device_uuid":{"uuid":"R1"}},"endpoint_uuid":{"uuid":"1/1"}}, + {"device_id":{"device_uuid":{"uuid":"R4"}},"endpoint_uuid":{"uuid":"1/1"}} + ], + "service_constraints": [ + {"sla_capacity": {"capacity_gbps": 10.0}}, + {"sla_latency": {"e2e_latency_ms": 15.2}} + ], + "service_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "/settings", "resource_value": { + "address_families": ["IPV4"], "bgp_as": 65000, "bgp_route_target": "65000:123", + "mtu": 1512, "vlan_id": 111 + }}}, + {"action": 1, "custom": {"resource_key": "/device[R1]/endpoint[1/1]/settings", "resource_value": { + "sub_interface_index": 0, "vlan_id": 111 + }}}, + {"action": 1, "custom": {"resource_key": "/device[R4]/endpoint[1/1]/settings", "resource_value": { + "sub_interface_index": 0, "vlan_id": 111 + }}} + ]} + } + ] +} diff --git a/src/service/tests/test_service_recompute_cons.sh b/src/service/tests/test_service_recompute_cons.sh new file mode 100644 index 0000000000000000000000000000000000000000..e5bc18895b2968ba99b7262458ed988e57ee920c --- /dev/null +++ b/src/service/tests/test_service_recompute_cons.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source my_deploy.sh +./deploy/all.sh + +source tfs_runtime_env_vars.sh +PYTHONPATH=./src pytest --log-level=INFO --verbose src/service/tests/test_unitary_recompute_conns.py diff --git a/src/service/tests/test_unitary_recompute_conns.py b/src/service/tests/test_unitary_recompute_conns.py new file mode 100644 index 0000000000000000000000000000000000000000..717e3af73b0d21d1dfeeab1e388c5df663417337 --- /dev/null +++ b/src/service/tests/test_unitary_recompute_conns.py @@ -0,0 +1,120 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, pytest +from common.Constants import DEFAULT_CONTEXT_NAME +from common.proto.context_pb2 import ContextId, Service +from common.tools.descriptor.Loader import DescriptorLoader, check_descriptor_load_results, validate_empty_scenario +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.Context import json_context_id +from context.client.ContextClient import ContextClient +from device.client.DeviceClient import DeviceClient +from service.client.ServiceClient import ServiceClient + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +DESCRIPTOR_FILE = 'src/service/tests/descriptors_recompute_conns.json' +ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) + +@pytest.fixture(scope='session') +def context_client(): + _client = ContextClient() + yield _client + _client.close() + +@pytest.fixture(scope='session') +def device_client(): + _client = DeviceClient() + yield _client + _client.close() + +@pytest.fixture(scope='session') +def service_client(): + _client = ServiceClient() + yield _client + _client.close() + + +def test_service_recompute_connection( + context_client : ContextClient, # pylint: disable=redefined-outer-name + device_client : DeviceClient, # pylint: disable=redefined-outer-name + service_client : ServiceClient, # pylint: disable=redefined-outer-name +) -> None: + + # ===== Setup scenario ============================================================================================= + validate_empty_scenario(context_client) + + # Load descriptors and validate the base scenario + descriptor_loader = DescriptorLoader( + descriptors_file=DESCRIPTOR_FILE, context_client=context_client, device_client=device_client, + service_client=service_client) + results = descriptor_loader.process() + check_descriptor_load_results(results, descriptor_loader) + descriptor_loader.validate() + + + # ===== Recompute Connection ======================================================================================= + response = context_client.ListServices(ADMIN_CONTEXT_ID) + LOGGER.info('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response))) + assert len(response.services) == 1 + service = response.services[0] + service_id = service.service_id + + response = context_client.ListConnections(service_id) + LOGGER.info(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format( + grpc_message_to_json_string(service_id), len(response.connections), grpc_message_to_json_string(response))) + assert len(response.connections) == 1 # 1 connection per service + str_old_connections = grpc_message_to_json_string(response) + + # Change path first time + request = Service() + request.CopyFrom(service) + del request.service_endpoint_ids[:] # pylint: disable=no-member + del request.service_constraints[:] # pylint: disable=no-member + del request.service_config.config_rules[:] # pylint: disable=no-member + service_client.RecomputeConnections(request) + + response = context_client.ListConnections(service_id) + LOGGER.info(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format( + grpc_message_to_json_string(service_id), len(response.connections), grpc_message_to_json_string(response))) + assert len(response.connections) == 1 # 1 connection per service + str_new_connections = grpc_message_to_json_string(response) + + assert str_old_connections != str_new_connections + + str_old_connections = str_new_connections + + # Change path second time + request = Service() + request.CopyFrom(service) + del request.service_endpoint_ids[:] # pylint: disable=no-member + del request.service_constraints[:] # pylint: disable=no-member + del request.service_config.config_rules[:] # pylint: disable=no-member + service_client.RecomputeConnections(request) + + response = context_client.ListConnections(service_id) + LOGGER.info(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format( + grpc_message_to_json_string(service_id), len(response.connections), grpc_message_to_json_string(response))) + assert len(response.connections) == 1 # 1 connection per service + str_new_connections = grpc_message_to_json_string(response) + + assert str_old_connections != str_new_connections + + + # ===== Cleanup scenario =========================================================================================== + # Validate and unload the base scenario + descriptor_loader.validate() + descriptor_loader.unload() + validate_empty_scenario(context_client) diff --git a/src/slice/service/__main__.py b/src/slice/service/__main__.py index aef1c4b82a540ddb40f35f4af2340ead539a0451..4d581530a40c16cd130dbf12dd0aa2936902c272 100644 --- a/src/slice/service/__main__.py +++ b/src/slice/service/__main__.py @@ -55,7 +55,7 @@ def main(): grpc_service.start() # Wait for Ctrl+C or termination signal - while not terminate.wait(timeout=0.1): pass + while not terminate.wait(timeout=1.0): pass LOGGER.info('Terminating...') grpc_service.stop() diff --git a/src/webui/service/templates/device/detail.html b/src/webui/service/templates/device/detail.html index 1b4b43f5ad12956ae8bb2b1a843ce5e57ef29a2c..4d33578e2532c26b4062565bd2cbb52106773a1a 100644 --- a/src/webui/service/templates/device/detail.html +++ b/src/webui/service/templates/device/detail.html @@ -47,6 +47,7 @@ <b>UUID: </b>{{ device.device_id.device_uuid.uuid }}<br> <b>Name: </b>{{ device.name }}<br> <b>Type: </b>{{ device.device_type }}<br> + <b>Controller: </b>{{ device.controller_id.device_uuid.uuid }}<br> <b>Status: </b> {{ dose.Name(device.device_operational_status).replace('DEVICEOPERATIONALSTATUS_', '') }}<br> <b>Drivers: </b> <ul>