From dc934c3d5da59353158af3313cfe3e415d5d7bc1 Mon Sep 17 00:00:00 2001 From: cmanso <cmanso@protonmail.com> Date: Tue, 6 Sep 2022 15:10:16 +0200 Subject: [PATCH 001/158] Context model updated to SQLAlchemy --- src/common/Constants.py | 4 +- src/context/requirements.in | 3 + src/context/service/Database.py | 25 ++++ src/context/service/__main__.py | 26 +++- src/context/service/database/Base.py | 2 + src/context/service/database/ContextModel.py | 24 ++-- .../service/grpc_server/ContextService.py | 9 +- .../grpc_server/ContextServiceServicerImpl.py | 121 ++++++++++-------- src/context/tests/test_unitary.py | 110 ++++++++++------ 9 files changed, 214 insertions(+), 110 deletions(-) create mode 100644 src/context/service/Database.py create mode 100644 src/context/service/database/Base.py diff --git a/src/common/Constants.py b/src/common/Constants.py index f18d43840..03f34a410 100644 --- a/src/common/Constants.py +++ b/src/common/Constants.py @@ -30,8 +30,8 @@ DEFAULT_HTTP_BIND_ADDRESS = '0.0.0.0' DEFAULT_METRICS_PORT = 9192 # Default context and topology UUIDs -DEFAULT_CONTEXT_UUID = 'admin' -DEFAULT_TOPOLOGY_UUID = 'admin' +DEFAULT_CONTEXT_UUID = '85f78267-4c5e-4f80-ad2f-7fbaca7c62a0' +DEFAULT_TOPOLOGY_UUID = '85f78267-4c5e-4f80-ad2f-7fbaca7c62a0' # Default service names class ServiceNameEnum(Enum): diff --git a/src/context/requirements.in b/src/context/requirements.in index 9cc7e71f2..6e07456fc 100644 --- a/src/context/requirements.in +++ b/src/context/requirements.in @@ -2,3 +2,6 @@ Flask==2.1.3 Flask-RESTful==0.3.9 redis==4.1.2 requests==2.27.1 +sqlalchemy==1.4.40 +sqlalchemy-cockroachdb +psycopg2-binary diff --git a/src/context/service/Database.py b/src/context/service/Database.py new file mode 100644 index 000000000..e25e2319c --- /dev/null +++ b/src/context/service/Database.py @@ -0,0 +1,25 @@ +from sqlalchemy.orm import Session +from context.service.database.Base import Base +import logging + +LOGGER = logging.getLogger(__name__) + + +class Database(Session): + def __init__(self, session): + super().__init__() + self.session = session + + def query_all(self, model): + result = [] + with self.session() as session: + for entry in session.query(model).all(): + result.append(entry) + + return result + + def clear(self): + with self.session() as session: + engine = session.get_bind() + Base.metadata.drop_all(engine) + Base.metadata.create_all(engine) diff --git a/src/context/service/__main__.py b/src/context/service/__main__.py index 53754caf4..154c8ff00 100644 --- a/src/context/service/__main__.py +++ b/src/context/service/__main__.py @@ -15,15 +15,18 @@ import logging, signal, sys, threading from prometheus_client import start_http_server from common.Settings import get_log_level, get_metrics_port, get_setting -from common.orm.Database import Database -from common.orm.Factory import get_database_backend from common.message_broker.Factory import get_messagebroker_backend from common.message_broker.MessageBroker import MessageBroker from context.Config import POPULATE_FAKE_DATA +from sqlalchemy.orm import sessionmaker, declarative_base +from context.service.database.Base import Base from .grpc_server.ContextService import ContextService from .rest_server.Resources import RESOURCES from .rest_server.RestServer import RestServer from .Populate import populate +# from models import Device, EndPoint, EndPointId, DeviceDriverEnum, DeviceOperationalStatusEnum, ConfigActionEnum, \ +# ConfigRule, KpiSampleType, Base +from sqlalchemy import create_engine terminate = threading.Event() LOGGER = None @@ -49,18 +52,31 @@ def main(): start_http_server(metrics_port) # Get database instance - database = Database(get_database_backend()) + db_uri = 'cockroachdb://root@10.152.183.121:26257/defaultdb?sslmode=disable' + LOGGER.debug('Connecting to DB: {}'.format(db_uri)) + + # engine = create_engine(db_uri, echo=False) + + try: + engine = create_engine(db_uri) + except Exception as e: + LOGGER.error("Failed to connect to database.") + LOGGER.error(f"{e}") + return 1 + + Base.metadata.create_all(engine) + session = sessionmaker(bind=engine) # Get message broker instance messagebroker = MessageBroker(get_messagebroker_backend()) # Starting context service - grpc_service = ContextService(database, messagebroker) + grpc_service = ContextService(session, messagebroker) grpc_service.start() rest_server = RestServer() for endpoint_name, resource_class, resource_url in RESOURCES: - rest_server.add_resource(resource_class, resource_url, endpoint=endpoint_name, resource_class_args=(database,)) + rest_server.add_resource(resource_class, resource_url, endpoint=endpoint_name, resource_class_args=(session,)) rest_server.start() populate_fake_data = get_setting('POPULATE_FAKE_DATA', default=POPULATE_FAKE_DATA) diff --git a/src/context/service/database/Base.py b/src/context/service/database/Base.py new file mode 100644 index 000000000..c64447da1 --- /dev/null +++ b/src/context/service/database/Base.py @@ -0,0 +1,2 @@ +from sqlalchemy.ext.declarative import declarative_base +Base = declarative_base() diff --git a/src/context/service/database/ContextModel.py b/src/context/service/database/ContextModel.py index a12e6669d..ba55fd566 100644 --- a/src/context/service/database/ContextModel.py +++ b/src/context/service/database/ContextModel.py @@ -14,19 +14,23 @@ import logging from typing import Dict, List -from common.orm.fields.PrimaryKeyField import PrimaryKeyField -from common.orm.fields.StringField import StringField -from common.orm.model.Model import Model +from sqlalchemy import Column +from sqlalchemy.dialects.postgresql import UUID +from context.service.database.Base import Base + LOGGER = logging.getLogger(__name__) -class ContextModel(Model): - pk = PrimaryKeyField() - context_uuid = StringField(required=True, allow_empty=False) + +class ContextModel(Base): + __tablename__ = 'Context' + + context_uuid = Column(UUID(as_uuid=False), primary_key=True) def dump_id(self) -> Dict: return {'context_uuid': {'uuid': self.context_uuid}} + """ def dump_service_ids(self) -> List[Dict]: from .ServiceModel import ServiceModel # pylint: disable=import-outside-toplevel db_service_pks = self.references(ServiceModel) @@ -36,9 +40,11 @@ class ContextModel(Model): from .TopologyModel import TopologyModel # pylint: disable=import-outside-toplevel db_topology_pks = self.references(TopologyModel) return [TopologyModel(self.database, pk).dump_id() for pk,_ in db_topology_pks] + """ - def dump(self, include_services=True, include_topologies=True) -> Dict: # pylint: disable=arguments-differ + def dump(self, include_services=True, include_topologies=True) -> Dict: # pylint: disable=arguments-differ result = {'context_id': self.dump_id()} - if include_services: result['service_ids'] = self.dump_service_ids() - if include_topologies: result['topology_ids'] = self.dump_topology_ids() + # if include_services: result['service_ids'] = self.dump_service_ids() + # if include_topologies: result['topology_ids'] = self.dump_topology_ids() return result + diff --git a/src/context/service/grpc_server/ContextService.py b/src/context/service/grpc_server/ContextService.py index 1b54ec540..d029b54e0 100644 --- a/src/context/service/grpc_server/ContextService.py +++ b/src/context/service/grpc_server/ContextService.py @@ -15,19 +15,22 @@ from common.Constants import ServiceNameEnum from common.Settings import get_service_port_grpc from common.message_broker.MessageBroker import MessageBroker -from common.orm.Database import Database from common.proto.context_pb2_grpc import add_ContextServiceServicer_to_server from common.tools.service.GenericGrpcService import GenericGrpcService +from sqlalchemy.orm import Session +import logging + from .ContextServiceServicerImpl import ContextServiceServicerImpl # Custom gRPC settings GRPC_MAX_WORKERS = 200 # multiple clients might keep connections alive for Get*Events() RPC methods +LOGGER = logging.getLogger(__name__) class ContextService(GenericGrpcService): - def __init__(self, database : Database, messagebroker : MessageBroker, cls_name: str = __name__) -> None: + def __init__(self, session : Session, messagebroker : MessageBroker, cls_name: str = __name__) -> None: port = get_service_port_grpc(ServiceNameEnum.CONTEXT) super().__init__(port, max_workers=GRPC_MAX_WORKERS, cls_name=cls_name) - self.context_servicer = ContextServiceServicerImpl(database, messagebroker) + self.context_servicer = ContextServiceServicerImpl(session, messagebroker) def install_servicers(self): add_ContextServiceServicer_to_server(self.context_servicer, self.server) diff --git a/src/context/service/grpc_server/ContextServiceServicerImpl.py b/src/context/service/grpc_server/ContextServiceServicerImpl.py index 4c8f957ec..36f79a15c 100644 --- a/src/context/service/grpc_server/ContextServiceServicerImpl.py +++ b/src/context/service/grpc_server/ContextServiceServicerImpl.py @@ -31,10 +31,13 @@ from common.proto.context_pb2 import ( from common.proto.context_pb2_grpc import ContextServiceServicer from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method from common.rpc_method_wrapper.ServiceExceptions import InvalidArgumentException +from sqlalchemy.orm import Session +from common.rpc_method_wrapper.ServiceExceptions import NotFoundException + +""" from context.service.database.ConfigModel import grpc_config_rules_to_raw, update_config from context.service.database.ConnectionModel import ConnectionModel, set_path from context.service.database.ConstraintModel import set_constraints -from context.service.database.ContextModel import ContextModel from context.service.database.DeviceModel import DeviceModel, grpc_to_enum__device_operational_status, set_drivers from context.service.database.EndPointModel import EndPointModel, set_kpi_sample_types from context.service.database.Events import notify_event @@ -46,6 +49,11 @@ from context.service.database.ServiceModel import ( ServiceModel, grpc_to_enum__service_status, grpc_to_enum__service_type) from context.service.database.SliceModel import SliceModel, grpc_to_enum__slice_status from context.service.database.TopologyModel import TopologyModel +""" +from context.service.database.ContextModel import ContextModel +# from context.service.database.TopologyModel import TopologyModel +from context.service.database.Events import notify_event + from .Constants import ( CONSUME_TIMEOUT, TOPIC_CONNECTION, TOPIC_CONTEXT, TOPIC_DEVICE, TOPIC_LINK, TOPIC_SERVICE, TOPIC_SLICE, TOPIC_TOPOLOGY) @@ -65,10 +73,10 @@ METHOD_NAMES = [ METRICS = create_metrics(SERVICE_NAME, METHOD_NAMES) class ContextServiceServicerImpl(ContextServiceServicer): - def __init__(self, database : Database, messagebroker : MessageBroker): + def __init__(self, session : Session, messagebroker : MessageBroker): LOGGER.debug('Creating Servicer...') self.lock = threading.Lock() - self.database = database + self.session = session self.messagebroker = messagebroker LOGGER.debug('Servicer Created') @@ -77,77 +85,83 @@ class ContextServiceServicerImpl(ContextServiceServicer): @safe_and_metered_rpc_method(METRICS, LOGGER) def ListContextIds(self, request: Empty, context : grpc.ServicerContext) -> ContextIdList: - with self.lock: - db_contexts : List[ContextModel] = get_all_objects(self.database, ContextModel) - db_contexts = sorted(db_contexts, key=operator.attrgetter('pk')) - return ContextIdList(context_ids=[db_context.dump_id() for db_context in db_contexts]) + with self.session() as session: + result = session.query(ContextModel).all() + + return ContextIdList(context_ids=[row.dump_id() for row in result]) + @safe_and_metered_rpc_method(METRICS, LOGGER) def ListContexts(self, request: Empty, context : grpc.ServicerContext) -> ContextList: - with self.lock: - db_contexts : List[ContextModel] = get_all_objects(self.database, ContextModel) - db_contexts = sorted(db_contexts, key=operator.attrgetter('pk')) - return ContextList(contexts=[db_context.dump() for db_context in db_contexts]) + with self.session() as session: + result = session.query(ContextModel).all() + + return ContextList(contexts=[row.dump() for row in result]) + @safe_and_metered_rpc_method(METRICS, LOGGER) def GetContext(self, request: ContextId, context : grpc.ServicerContext) -> Context: - with self.lock: - context_uuid = request.context_uuid.uuid - db_context : ContextModel = get_object(self.database, ContextModel, context_uuid) - return Context(**db_context.dump(include_services=True, include_topologies=True)) + context_uuid = request.context_uuid.uuid + with self.session() as session: + result = session.query(ContextModel).filter_by(context_uuid=context_uuid).one_or_none() + + if not result: + raise NotFoundException(ContextModel.__name__.replace('Model', ''), context_uuid) + + return Context(**result.dump()) @safe_and_metered_rpc_method(METRICS, LOGGER) def SetContext(self, request: Context, context : grpc.ServicerContext) -> ContextId: - with self.lock: - context_uuid = request.context_id.context_uuid.uuid + context_uuid = request.context_id.context_uuid.uuid - for i,topology_id in enumerate(request.topology_ids): - topology_context_uuid = topology_id.context_id.context_uuid.uuid - if topology_context_uuid != context_uuid: - raise InvalidArgumentException( - 'request.topology_ids[{:d}].context_id.context_uuid.uuid'.format(i), topology_context_uuid, - ['should be == {:s}({:s})'.format('request.context_id.context_uuid.uuid', context_uuid)]) + for i, topology_id in enumerate(request.topology_ids): + topology_context_uuid = topology_id.context_id.context_uuid.uuid + if topology_context_uuid != context_uuid: + raise InvalidArgumentException( + 'request.topology_ids[{:d}].context_id.context_uuid.uuid'.format(i), topology_context_uuid, + ['should be == {:s}({:s})'.format('request.context_id.context_uuid.uuid', context_uuid)]) - for i,service_id in enumerate(request.service_ids): - service_context_uuid = service_id.context_id.context_uuid.uuid - if service_context_uuid != context_uuid: - raise InvalidArgumentException( - 'request.service_ids[{:d}].context_id.context_uuid.uuid'.format(i), service_context_uuid, - ['should be == {:s}({:s})'.format('request.context_id.context_uuid.uuid', context_uuid)]) + for i, service_id in enumerate(request.service_ids): + service_context_uuid = service_id.context_id.context_uuid.uuid + if service_context_uuid != context_uuid: + raise InvalidArgumentException( + 'request.service_ids[{:d}].context_id.context_uuid.uuid'.format(i), service_context_uuid, + ['should be == {:s}({:s})'.format('request.context_id.context_uuid.uuid', context_uuid)]) - result : Tuple[ContextModel, bool] = update_or_create_object( - self.database, ContextModel, context_uuid, {'context_uuid': context_uuid}) - db_context, updated = result + context_add = ContextModel(context_uuid=context_uuid) - for i,topology_id in enumerate(request.topology_ids): - topology_context_uuid = topology_id.context_id.context_uuid.uuid - topology_uuid = topology_id.topology_uuid.uuid - get_object(self.database, TopologyModel, [context_uuid, topology_uuid]) # just to confirm it exists + updated = True + with self.session() as session: + result = session.query(ContextModel).filter_by(context_uuid=context_uuid).all() + if not result: + updated = False - for i,service_id in enumerate(request.service_ids): - service_context_uuid = service_id.context_id.context_uuid.uuid - service_uuid = service_id.service_uuid.uuid - get_object(self.database, ServiceModel, [context_uuid, service_uuid]) # just to confirm it exists + with self.session() as session: + session.merge(context_add) + session.commit() + + + event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE + dict_context_id = context_add.dump_id() + notify_event(self.messagebroker, TOPIC_CONTEXT, event_type, {'context_id': dict_context_id}) + return ContextId(**context_add.dump_id()) - event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE - dict_context_id = db_context.dump_id() - notify_event(self.messagebroker, TOPIC_CONTEXT, event_type, {'context_id': dict_context_id}) - return ContextId(**dict_context_id) @safe_and_metered_rpc_method(METRICS, LOGGER) def RemoveContext(self, request: ContextId, context : grpc.ServicerContext) -> Empty: - with self.lock: - context_uuid = request.context_uuid.uuid - db_context = ContextModel(self.database, context_uuid, auto_load=False) - found = db_context.load() - if not found: return Empty() - - dict_context_id = db_context.dump_id() - db_context.delete() + context_uuid = request.context_uuid.uuid + + with self.session() as session: + result = session.query(ContextModel).filter_by(context_uuid=context_uuid).one_or_none() + if not result: + return Empty() + session.query(ContextModel).filter_by(context_uuid=context_uuid).delete() + session.commit() event_type = EventTypeEnum.EVENTTYPE_REMOVE - notify_event(self.messagebroker, TOPIC_CONTEXT, event_type, {'context_id': dict_context_id}) + notify_event(self.messagebroker, TOPIC_CONTEXT, event_type, {'context_id': result.dump_id()}) return Empty() + """ @safe_and_metered_rpc_method(METRICS, LOGGER) def GetContextEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[ContextEvent]: for message in self.messagebroker.consume({TOPIC_CONTEXT}, consume_timeout=CONSUME_TIMEOUT): @@ -761,3 +775,4 @@ class ContextServiceServicerImpl(ContextServiceServicer): def GetConnectionEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[ConnectionEvent]: for message in self.messagebroker.consume({TOPIC_CONNECTION}, consume_timeout=CONSUME_TIMEOUT): yield ConnectionEvent(**json.loads(message.content)) + """ \ No newline at end of file diff --git a/src/context/tests/test_unitary.py b/src/context/tests/test_unitary.py index b46c9468c..0879dcb06 100644 --- a/src/context/tests/test_unitary.py +++ b/src/context/tests/test_unitary.py @@ -19,7 +19,7 @@ from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID, Servic from common.Settings import ( ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, ENVVAR_SUFIX_SERVICE_PORT_HTTP, get_env_var_name, get_service_baseurl_http, get_service_port_grpc, get_service_port_http) -from common.orm.Database import Database +from context.service.Database import Database from common.orm.Factory import get_database_backend, BackendEnum as DatabaseBackendEnum from common.message_broker.Factory import get_messagebroker_backend, BackendEnum as MessageBrokerBackendEnum from common.message_broker.MessageBroker import MessageBroker @@ -40,6 +40,12 @@ from context.service.grpc_server.ContextService import ContextService from context.service.Populate import populate from context.service.rest_server.RestServer import RestServer from context.service.rest_server.Resources import RESOURCES +from requests import Session +from sqlalchemy import create_engine +from sqlalchemy.orm import sessionmaker +from context.service.database.ContextModel import ContextModel +from context.service.database.Base import Base + from .Objects import ( CONNECTION_R1_R3, CONNECTION_R1_R3_ID, CONNECTION_R1_R3_UUID, CONTEXT, CONTEXT_ID, DEVICE_R1, DEVICE_R1_ID, DEVICE_R1_UUID, DEVICE_R2, DEVICE_R2_ID, DEVICE_R2_UUID, DEVICE_R3, DEVICE_R3_ID, DEVICE_R3_UUID, LINK_R1_R2, @@ -50,8 +56,8 @@ LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.DEBUG) LOCAL_HOST = '127.0.0.1' -GRPC_PORT = 10000 + get_service_port_grpc(ServiceNameEnum.CONTEXT) # avoid privileged ports -HTTP_PORT = 10000 + get_service_port_http(ServiceNameEnum.CONTEXT) # avoid privileged ports +GRPC_PORT = 10000 + int(get_service_port_grpc(ServiceNameEnum.CONTEXT)) # avoid privileged ports +HTTP_PORT = 10000 + int(get_service_port_http(ServiceNameEnum.CONTEXT)) # avoid privileged ports os.environ[get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_HOST )] = str(LOCAL_HOST) os.environ[get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(GRPC_PORT) @@ -68,12 +74,10 @@ REDIS_CONFIG = { } SCENARIOS = [ - ('all_inmemory', DatabaseBackendEnum.INMEMORY, {}, MessageBrokerBackendEnum.INMEMORY, {} ), - ('all_redis', DatabaseBackendEnum.REDIS, REDIS_CONFIG, MessageBrokerBackendEnum.REDIS, REDIS_CONFIG), + ('all_sqlalchemy', {}, MessageBrokerBackendEnum.INMEMORY, {} ), ] - @pytest.fixture(scope='session', ids=[str(scenario[0]) for scenario in SCENARIOS], params=SCENARIOS) -def context_db_mb(request) -> Tuple[Database, MessageBroker]: +def context_db_mb(request) -> Tuple[Session, MessageBroker]: name,db_backend,db_settings,mb_backend,mb_settings = request.param msg = 'Running scenario {:s} db_backend={:s}, db_settings={:s}, mb_backend={:s}, mb_settings={:s}...' LOGGER.info(msg.format(str(name), str(db_backend.value), str(db_settings), str(mb_backend.value), str(mb_settings))) @@ -82,13 +86,36 @@ def context_db_mb(request) -> Tuple[Database, MessageBroker]: yield _database, _message_broker _message_broker.terminate() +@pytest.fixture(scope='session', ids=[str(scenario[0]) for scenario in SCENARIOS], params=SCENARIOS) +def context_s_mb(request) -> Tuple[Session, MessageBroker]: + name,db_session,mb_backend,mb_settings = request.param + msg = 'Running scenario {:s} db_session={:s}, mb_backend={:s}, mb_settings={:s}...' + LOGGER.info(msg.format(str(name), str(db_session), str(mb_backend.value), str(mb_settings))) + + db_uri = 'cockroachdb://root@10.152.183.121:26257/defaultdb?sslmode=disable' + LOGGER.debug('Connecting to DB: {}'.format(db_uri)) + + try: + engine = create_engine(db_uri) + except Exception as e: + LOGGER.error("Failed to connect to database.") + LOGGER.error(f"{e}") + return 1 + + Base.metadata.create_all(engine) + _session = sessionmaker(bind=engine) + + _message_broker = MessageBroker(get_messagebroker_backend(backend=mb_backend, **mb_settings)) + yield _session, _message_broker + _message_broker.terminate() + @pytest.fixture(scope='session') -def context_service_grpc(context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name - _service = ContextService(context_db_mb[0], context_db_mb[1]) +def context_service_grpc(context_s_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name + _service = ContextService(context_s_mb[0], context_s_mb[1]) _service.start() yield _service _service.stop() - +""" @pytest.fixture(scope='session') def context_service_rest(context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name database = context_db_mb[0] @@ -100,13 +127,13 @@ def context_service_rest(context_db_mb : Tuple[Database, MessageBroker]): # pyli yield _rest_server _rest_server.shutdown() _rest_server.join() - +""" @pytest.fixture(scope='session') def context_client_grpc(context_service_grpc : ContextService): # pylint: disable=redefined-outer-name _client = ContextClient() yield _client _client.close() - +""" def do_rest_request(url : str): base_url = get_service_baseurl_http(ServiceNameEnum.CONTEXT) request_url = 'http://{:s}:{:s}{:s}{:s}'.format(str(LOCAL_HOST), str(HTTP_PORT), str(base_url), url) @@ -115,18 +142,18 @@ def do_rest_request(url : str): LOGGER.warning('Reply: {:s}'.format(str(reply.text))) assert reply.status_code == 200, 'Reply failed with code {}'.format(reply.status_code) return reply.json() - +""" # ----- Test gRPC methods ---------------------------------------------------------------------------------------------- - def test_grpc_context( context_client_grpc : ContextClient, # pylint: disable=redefined-outer-name - context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name - context_database = context_db_mb[0] + context_s_mb : Tuple[Session, MessageBroker]): # pylint: disable=redefined-outer-name + Session = context_s_mb[0] - # ----- Clean the database ----------------------------------------------------------------------------------------- - context_database.clear_all() + database = Database(Session) + # ----- Clean the database ----------------------------------------------------------------------------------------- + database.clear() # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- events_collector = EventsCollector(context_client_grpc) events_collector.start() @@ -145,7 +172,7 @@ def test_grpc_context( assert len(response.contexts) == 0 # ----- Dump state of database before create the object ------------------------------------------------------------ - db_entries = context_database.dump() + db_entries = database.query_all(ContextModel) LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) for db_entry in db_entries: LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover @@ -156,51 +183,56 @@ def test_grpc_context( response = context_client_grpc.SetContext(Context(**CONTEXT)) assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID + wrong_uuid = 'c97c4185-e1d1-4ea7-b6b9-afbf76cb61f4' with pytest.raises(grpc.RpcError) as e: WRONG_TOPOLOGY_ID = copy.deepcopy(TOPOLOGY_ID) - WRONG_TOPOLOGY_ID['context_id']['context_uuid']['uuid'] = 'wrong-context-uuid' + WRONG_TOPOLOGY_ID['context_id']['context_uuid']['uuid'] = wrong_uuid WRONG_CONTEXT = copy.deepcopy(CONTEXT) WRONG_CONTEXT['topology_ids'].append(WRONG_TOPOLOGY_ID) context_client_grpc.SetContext(Context(**WRONG_CONTEXT)) assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT - msg = 'request.topology_ids[0].context_id.context_uuid.uuid(wrong-context-uuid) is invalid; '\ - 'should be == request.context_id.context_uuid.uuid(admin)' + msg = 'request.topology_ids[0].context_id.context_uuid.uuid({}) is invalid; '\ + 'should be == request.context_id.context_uuid.uuid({})'.format(wrong_uuid, DEFAULT_CONTEXT_UUID) assert e.value.details() == msg with pytest.raises(grpc.RpcError) as e: WRONG_SERVICE_ID = copy.deepcopy(SERVICE_R1_R2_ID) - WRONG_SERVICE_ID['context_id']['context_uuid']['uuid'] = 'wrong-context-uuid' + WRONG_SERVICE_ID['context_id']['context_uuid']['uuid'] = wrong_uuid WRONG_CONTEXT = copy.deepcopy(CONTEXT) WRONG_CONTEXT['service_ids'].append(WRONG_SERVICE_ID) context_client_grpc.SetContext(Context(**WRONG_CONTEXT)) assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT - msg = 'request.service_ids[0].context_id.context_uuid.uuid(wrong-context-uuid) is invalid; '\ - 'should be == request.context_id.context_uuid.uuid(admin)' + msg = 'request.service_ids[0].context_id.context_uuid.uuid({}) is invalid; '\ + 'should be == request.context_id.context_uuid.uuid({})'.format(wrong_uuid, DEFAULT_CONTEXT_UUID) assert e.value.details() == msg # ----- Check create event ----------------------------------------------------------------------------------------- + """ event = events_collector.get_event(block=True) assert isinstance(event, ContextEvent) assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE assert event.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - + """ # ----- Update the object ------------------------------------------------------------------------------------------ response = context_client_grpc.SetContext(Context(**CONTEXT)) assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID # ----- Check update event ----------------------------------------------------------------------------------------- + """ event = events_collector.get_event(block=True) assert isinstance(event, ContextEvent) assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE assert event.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + """ # ----- Dump state of database after create/update the object ------------------------------------------------------ - db_entries = context_database.dump() + db_entries = database.query_all(ContextModel) + LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover + # for db_entry in db_entries: + # LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 2 + assert len(db_entries) == 1 # ----- Get when the object exists --------------------------------------------------------------------------------- response = context_client_grpc.GetContext(ContextId(**CONTEXT_ID)) @@ -223,22 +255,23 @@ def test_grpc_context( context_client_grpc.RemoveContext(ContextId(**CONTEXT_ID)) # ----- Check remove event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True) - assert isinstance(event, ContextEvent) - assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert event.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + # event = events_collector.get_event(block=True) + # assert isinstance(event, ContextEvent) + # assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + # assert event.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- events_collector.stop() # ----- Dump state of database after remove the object ------------------------------------------------------------- - db_entries = context_database.dump() + db_entries = database.query_all(ContextModel) + LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover + # for db_entry in db_entries: + # LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover LOGGER.info('-----------------------------------------------------------') assert len(db_entries) == 0 - + """ def test_grpc_topology( context_client_grpc : ContextClient, # pylint: disable=redefined-outer-name @@ -1293,3 +1326,4 @@ def test_tools_fast_string_hasher(): fast_hasher(('hello', 'world')) fast_hasher(['hello'.encode('UTF-8'), 'world'.encode('UTF-8')]) fast_hasher(('hello'.encode('UTF-8'), 'world'.encode('UTF-8'))) +""" \ No newline at end of file -- GitLab From 1a9c0447ddc647e5c8dea16f9c3ec3577a2c7f81 Mon Sep 17 00:00:00 2001 From: cmanso <cmanso@protonmail.com> Date: Tue, 13 Sep 2022 14:58:32 +0200 Subject: [PATCH 002/158] Topology model updated to SQLAlchemy --- src/context/service/Database.py | 3 + src/context/service/database/ContextModel.py | 5 +- src/context/service/database/TopologyModel.py | 26 ++-- .../grpc_server/ContextServiceServicerImpl.py | 130 +++++++++--------- src/context/tests/test_unitary.py | 106 +++++++------- 5 files changed, 136 insertions(+), 134 deletions(-) diff --git a/src/context/service/Database.py b/src/context/service/Database.py index e25e2319c..281761ed8 100644 --- a/src/context/service/Database.py +++ b/src/context/service/Database.py @@ -18,6 +18,9 @@ class Database(Session): return result + def get_object(self): + pass + def clear(self): with self.session() as session: engine = session.get_bind() diff --git a/src/context/service/database/ContextModel.py b/src/context/service/database/ContextModel.py index ba55fd566..77a95ea03 100644 --- a/src/context/service/database/ContextModel.py +++ b/src/context/service/database/ContextModel.py @@ -17,6 +17,7 @@ from typing import Dict, List from sqlalchemy import Column from sqlalchemy.dialects.postgresql import UUID from context.service.database.Base import Base +from sqlalchemy.orm import relationship LOGGER = logging.getLogger(__name__) @@ -24,9 +25,11 @@ LOGGER = logging.getLogger(__name__) class ContextModel(Base): __tablename__ = 'Context' - context_uuid = Column(UUID(as_uuid=False), primary_key=True) + # Relationships + topology = relationship("TopologyModel", back_populates="context") + def dump_id(self) -> Dict: return {'context_uuid': {'uuid': self.context_uuid}} diff --git a/src/context/service/database/TopologyModel.py b/src/context/service/database/TopologyModel.py index 5909c7a2c..9f117c73c 100644 --- a/src/context/service/database/TopologyModel.py +++ b/src/context/service/database/TopologyModel.py @@ -19,23 +19,28 @@ from common.orm.fields.PrimaryKeyField import PrimaryKeyField from common.orm.fields.StringField import StringField from common.orm.model.Model import Model from common.orm.HighLevel import get_related_objects -from .ContextModel import ContextModel - +from sqlalchemy.orm import relationship +from sqlalchemy import Column, ForeignKey +from sqlalchemy.dialects.postgresql import UUID +from context.service.database.Base import Base LOGGER = logging.getLogger(__name__) -class TopologyModel(Model): - pk = PrimaryKeyField() - context_fk = ForeignKeyField(ContextModel) - topology_uuid = StringField(required=True, allow_empty=False) +class TopologyModel(Base): + __tablename__ = 'Topology' + context_fk = Column(UUID(as_uuid=False), ForeignKey("Context.context_uuid"), nullable=False) + topology_uuid = Column(UUID(as_uuid=False), primary_key=True, nullable=False) + + # Relationships + context = relationship("ContextModel", back_populates="topology", lazy="joined") def dump_id(self) -> Dict: - context_id = ContextModel(self.database, self.context_fk).dump_id() + context_id = self.context.dump_id() return { 'context_id': context_id, 'topology_uuid': {'uuid': self.topology_uuid}, } - def dump_device_ids(self) -> List[Dict]: + """def dump_device_ids(self) -> List[Dict]: from .RelationModels import TopologyDeviceModel # pylint: disable=import-outside-toplevel db_devices = get_related_objects(self, TopologyDeviceModel, 'device_fk') return [db_device.dump_id() for db_device in sorted(db_devices, key=operator.attrgetter('pk'))] @@ -44,11 +49,12 @@ class TopologyModel(Model): from .RelationModels import TopologyLinkModel # pylint: disable=import-outside-toplevel db_links = get_related_objects(self, TopologyLinkModel, 'link_fk') return [db_link.dump_id() for db_link in sorted(db_links, key=operator.attrgetter('pk'))] + """ def dump( # pylint: disable=arguments-differ self, include_devices=True, include_links=True ) -> Dict: result = {'topology_id': self.dump_id()} - if include_devices: result['device_ids'] = self.dump_device_ids() - if include_links: result['link_ids'] = self.dump_link_ids() + # if include_devices: result['device_ids'] = self.dump_device_ids() + # if include_links: result['link_ids'] = self.dump_link_ids() return result diff --git a/src/context/service/grpc_server/ContextServiceServicerImpl.py b/src/context/service/grpc_server/ContextServiceServicerImpl.py index 36f79a15c..bf51bf316 100644 --- a/src/context/service/grpc_server/ContextServiceServicerImpl.py +++ b/src/context/service/grpc_server/ContextServiceServicerImpl.py @@ -15,10 +15,8 @@ import grpc, json, logging, operator, threading from typing import Iterator, List, Set, Tuple from common.message_broker.MessageBroker import MessageBroker -from common.orm.Database import Database -from common.orm.HighLevel import ( - get_all_objects, get_object, get_or_create_object, get_related_objects, update_or_create_object) -from common.orm.backend.Tools import key_to_str +from context.service.Database import Database + from common.proto.context_pb2 import ( Connection, ConnectionEvent, ConnectionId, ConnectionIdList, ConnectionList, Context, ContextEvent, ContextId, ContextIdList, ContextList, @@ -31,9 +29,10 @@ from common.proto.context_pb2 import ( from common.proto.context_pb2_grpc import ContextServiceServicer from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method from common.rpc_method_wrapper.ServiceExceptions import InvalidArgumentException -from sqlalchemy.orm import Session +from sqlalchemy.orm import Session, contains_eager, selectinload from common.rpc_method_wrapper.ServiceExceptions import NotFoundException + """ from context.service.database.ConfigModel import grpc_config_rules_to_raw, update_config from context.service.database.ConnectionModel import ConnectionModel, set_path @@ -51,6 +50,7 @@ from context.service.database.SliceModel import SliceModel, grpc_to_enum__slice_ from context.service.database.TopologyModel import TopologyModel """ from context.service.database.ContextModel import ContextModel +from context.service.database.TopologyModel import TopologyModel # from context.service.database.TopologyModel import TopologyModel from context.service.database.Events import notify_event @@ -77,6 +77,7 @@ class ContextServiceServicerImpl(ContextServiceServicer): LOGGER.debug('Creating Servicer...') self.lock = threading.Lock() self.session = session + self.database = Database(session) self.messagebroker = messagebroker LOGGER.debug('Servicer Created') @@ -133,10 +134,8 @@ class ContextServiceServicerImpl(ContextServiceServicer): updated = True with self.session() as session: result = session.query(ContextModel).filter_by(context_uuid=context_uuid).all() - if not result: - updated = False - - with self.session() as session: + if not result: + updated = False session.merge(context_add) session.commit() @@ -161,7 +160,6 @@ class ContextServiceServicerImpl(ContextServiceServicer): notify_event(self.messagebroker, TOPIC_CONTEXT, event_type, {'context_id': result.dump_id()}) return Empty() - """ @safe_and_metered_rpc_method(METRICS, LOGGER) def GetContextEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[ContextEvent]: for message in self.messagebroker.consume({TOPIC_CONTEXT}, consume_timeout=CONSUME_TIMEOUT): @@ -174,75 +172,78 @@ class ContextServiceServicerImpl(ContextServiceServicer): def ListTopologyIds(self, request: ContextId, context : grpc.ServicerContext) -> TopologyIdList: with self.lock: context_uuid = request.context_uuid.uuid - db_context : ContextModel = get_object(self.database, ContextModel, context_uuid) - db_topologies : Set[TopologyModel] = get_related_objects(db_context, TopologyModel) - db_topologies = sorted(db_topologies, key=operator.attrgetter('pk')) + + with self.session() as session: + result = session.query(ContextModel).options(selectinload(ContextModel.topology)).filter_by(context_uuid=context_uuid).one_or_none() + if not result: + raise NotFoundException(ContextModel.__name__.replace('Model', ''), context_uuid) + + db_topologies = result.topology return TopologyIdList(topology_ids=[db_topology.dump_id() for db_topology in db_topologies]) @safe_and_metered_rpc_method(METRICS, LOGGER) def ListTopologies(self, request: ContextId, context : grpc.ServicerContext) -> TopologyList: - with self.lock: - context_uuid = request.context_uuid.uuid - db_context : ContextModel = get_object(self.database, ContextModel, context_uuid) - db_topologies : Set[TopologyModel] = get_related_objects(db_context, TopologyModel) - db_topologies = sorted(db_topologies, key=operator.attrgetter('pk')) - return TopologyList(topologies=[db_topology.dump() for db_topology in db_topologies]) + context_uuid = request.context_uuid.uuid + + with self.session() as session: + result = session.query(ContextModel).options(selectinload(ContextModel.topology)).filter_by( + context_uuid=context_uuid).one_or_none() + if not result: + raise NotFoundException(ContextModel.__name__.replace('Model', ''), context_uuid) + + db_topologies = result.topology + return TopologyList(topologies=[db_topology.dump() for db_topology in db_topologies]) @safe_and_metered_rpc_method(METRICS, LOGGER) - def GetTopology(self, request: TopologyId, context : grpc.ServicerContext) -> Topology: - with self.lock: - str_key = key_to_str([request.context_id.context_uuid.uuid, request.topology_uuid.uuid]) - db_topology : TopologyModel = get_object(self.database, TopologyModel, str_key) - return Topology(**db_topology.dump(include_devices=True, include_links=True)) + def GetTopology(self, request: TopologyId, contextt : grpc.ServicerContext) -> Topology: + context_uuid = request.context_id.context_uuid.uuid + topology_uuid = request.topology_uuid.uuid + + with self.session() as session: + result = session.query(TopologyModel).join(TopologyModel.context).filter(TopologyModel.topology_uuid==topology_uuid).options(contains_eager(TopologyModel.context)).one_or_none() + + if not result: + raise NotFoundException(TopologyModel.__name__.replace('Model', ''), topology_uuid) + + return Topology(**result.dump()) + @safe_and_metered_rpc_method(METRICS, LOGGER) def SetTopology(self, request: Topology, context : grpc.ServicerContext) -> TopologyId: - with self.lock: - context_uuid = request.topology_id.context_id.context_uuid.uuid - db_context : ContextModel = get_object(self.database, ContextModel, context_uuid) + context_uuid = request.topology_id.context_id.context_uuid.uuid + topology_uuid = request.topology_id.topology_uuid.uuid + with self.session() as session: + db_context: ContextModel = session.query(ContextModel).filter_by(context_uuid=context_uuid).one() - topology_uuid = request.topology_id.topology_uuid.uuid - str_topology_key = key_to_str([context_uuid, topology_uuid]) - result : Tuple[TopologyModel, bool] = update_or_create_object( - self.database, TopologyModel, str_topology_key, { - 'context_fk': db_context, 'topology_uuid': topology_uuid}) - db_topology,updated = result - - for device_id in request.device_ids: - device_uuid = device_id.device_uuid.uuid - db_device = get_object(self.database, DeviceModel, device_uuid) - str_topology_device_key = key_to_str([str_topology_key, device_uuid], separator='--') - result : Tuple[TopologyDeviceModel, bool] = update_or_create_object( - self.database, TopologyDeviceModel, str_topology_device_key, - {'topology_fk': db_topology, 'device_fk': db_device}) - #db_topology_device,topology_device_updated = result - - for link_id in request.link_ids: - link_uuid = link_id.link_uuid.uuid - db_link = get_object(self.database, LinkModel, link_uuid) - - str_topology_link_key = key_to_str([str_topology_key, link_uuid], separator='--') - result : Tuple[TopologyLinkModel, bool] = update_or_create_object( - self.database, TopologyLinkModel, str_topology_link_key, - {'topology_fk': db_topology, 'link_fk': db_link}) - #db_topology_link,topology_link_updated = result + topology_add = TopologyModel(topology_uuid=topology_uuid, context_fk=context_uuid) + topology_add.context = db_context + updated = True + with self.session() as session: + result = session.query(TopologyModel).join(TopologyModel.context).filter(TopologyModel.topology_uuid==topology_uuid).options(contains_eager(TopologyModel.context)).one_or_none() - event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE - dict_topology_id = db_topology.dump_id() - notify_event(self.messagebroker, TOPIC_TOPOLOGY, event_type, {'topology_id': dict_topology_id}) - return TopologyId(**dict_topology_id) + if not result: + updated = False + session.merge(topology_add) + session.commit() + + event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE + dict_topology_id = topology_add.dump_id() + notify_event(self.messagebroker, TOPIC_TOPOLOGY, event_type, {'topology_id': dict_topology_id}) + return TopologyId(**dict_topology_id) @safe_and_metered_rpc_method(METRICS, LOGGER) def RemoveTopology(self, request: TopologyId, context : grpc.ServicerContext) -> Empty: - with self.lock: - context_uuid = request.context_id.context_uuid.uuid - topology_uuid = request.topology_uuid.uuid - db_topology = TopologyModel(self.database, key_to_str([context_uuid, topology_uuid]), auto_load=False) - found = db_topology.load() - if not found: return Empty() + context_uuid = request.context_id.context_uuid.uuid + topology_uuid = request.topology_uuid.uuid - dict_topology_id = db_topology.dump_id() - db_topology.delete() + with self.session() as session: + result = session.query(TopologyModel).filter_by(topology_uuid=topology_uuid, context_fk=context_uuid).one_or_none() + if not result: + return Empty() + dict_topology_id = result.dump_id() + + session.query(TopologyModel).filter_by(topology_uuid=topology_uuid, context_fk=context_uuid).delete() + session.commit() event_type = EventTypeEnum.EVENTTYPE_REMOVE notify_event(self.messagebroker, TOPIC_TOPOLOGY, event_type, {'topology_id': dict_topology_id}) return Empty() @@ -251,6 +252,7 @@ class ContextServiceServicerImpl(ContextServiceServicer): def GetTopologyEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[TopologyEvent]: for message in self.messagebroker.consume({TOPIC_TOPOLOGY}, consume_timeout=CONSUME_TIMEOUT): yield TopologyEvent(**json.loads(message.content)) + """ # ----- Device ----------------------------------------------------------------------------------------------------- diff --git a/src/context/tests/test_unitary.py b/src/context/tests/test_unitary.py index 0879dcb06..b7a9cee92 100644 --- a/src/context/tests/test_unitary.py +++ b/src/context/tests/test_unitary.py @@ -44,6 +44,7 @@ from requests import Session from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker from context.service.database.ContextModel import ContextModel +from context.service.database.TopologyModel import TopologyModel from context.service.database.Base import Base from .Objects import ( @@ -76,15 +77,6 @@ REDIS_CONFIG = { SCENARIOS = [ ('all_sqlalchemy', {}, MessageBrokerBackendEnum.INMEMORY, {} ), ] -@pytest.fixture(scope='session', ids=[str(scenario[0]) for scenario in SCENARIOS], params=SCENARIOS) -def context_db_mb(request) -> Tuple[Session, MessageBroker]: - name,db_backend,db_settings,mb_backend,mb_settings = request.param - msg = 'Running scenario {:s} db_backend={:s}, db_settings={:s}, mb_backend={:s}, mb_settings={:s}...' - LOGGER.info(msg.format(str(name), str(db_backend.value), str(db_settings), str(mb_backend.value), str(mb_settings))) - _database = Database(get_database_backend(backend=db_backend, **db_settings)) - _message_broker = MessageBroker(get_messagebroker_backend(backend=mb_backend, **mb_settings)) - yield _database, _message_broker - _message_broker.terminate() @pytest.fixture(scope='session', ids=[str(scenario[0]) for scenario in SCENARIOS], params=SCENARIOS) def context_s_mb(request) -> Tuple[Session, MessageBroker]: @@ -207,23 +199,19 @@ def test_grpc_context( assert e.value.details() == msg # ----- Check create event ----------------------------------------------------------------------------------------- - """ event = events_collector.get_event(block=True) assert isinstance(event, ContextEvent) assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE assert event.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - """ # ----- Update the object ------------------------------------------------------------------------------------------ response = context_client_grpc.SetContext(Context(**CONTEXT)) assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID # ----- Check update event ----------------------------------------------------------------------------------------- - """ event = events_collector.get_event(block=True) assert isinstance(event, ContextEvent) assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE assert event.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - """ # ----- Dump state of database after create/update the object ------------------------------------------------------ db_entries = database.query_all(ContextModel) @@ -271,15 +259,16 @@ def test_grpc_context( # LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover LOGGER.info('-----------------------------------------------------------') assert len(db_entries) == 0 - """ def test_grpc_topology( - context_client_grpc : ContextClient, # pylint: disable=redefined-outer-name - context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name - context_database = context_db_mb[0] + context_client_grpc: ContextClient, # pylint: disable=redefined-outer-name + context_s_mb: Tuple[Session, MessageBroker]): # pylint: disable=redefined-outer-name + session = context_s_mb[0] + + database = Database(session) # ----- Clean the database ----------------------------------------------------------------------------------------- - context_database.clear_all() + database.clear() # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- events_collector = EventsCollector(context_client_grpc) @@ -288,32 +277,30 @@ def test_grpc_topology( # ----- Prepare dependencies for the test and capture related events ----------------------------------------------- response = context_client_grpc.SetContext(Context(**CONTEXT)) assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - event = events_collector.get_event(block=True) - assert isinstance(event, ContextEvent) - assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert event.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + # event = events_collector.get_event(block=True) + # assert isinstance(event, ContextEvent) + # assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE + # assert event.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID # ----- Get when the object does not exist ------------------------------------------------------------------------- with pytest.raises(grpc.RpcError) as e: context_client_grpc.GetTopology(TopologyId(**TOPOLOGY_ID)) assert e.value.code() == grpc.StatusCode.NOT_FOUND - assert e.value.details() == 'Topology({:s}/{:s}) not found'.format(DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID) - + # assert e.value.details() == 'Topology({:s}/{:s}) not found'.format(DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID) + assert e.value.details() == 'Topology({:s}) not found'.format(DEFAULT_TOPOLOGY_UUID) # ----- List when the object does not exist ------------------------------------------------------------------------ response = context_client_grpc.ListTopologyIds(ContextId(**CONTEXT_ID)) assert len(response.topology_ids) == 0 - response = context_client_grpc.ListTopologies(ContextId(**CONTEXT_ID)) assert len(response.topologies) == 0 # ----- Dump state of database before create the object ------------------------------------------------------------ - db_entries = context_database.dump() + db_entries = database.query_all(TopologyModel) LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover + # for db_entry in db_entries: + # LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 2 + assert len(db_entries) == 0 # ----- Create the object ------------------------------------------------------------------------------------------ response = context_client_grpc.SetTopology(Topology(**TOPOLOGY)) @@ -326,16 +313,16 @@ def test_grpc_topology( assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID # ----- Check create event ----------------------------------------------------------------------------------------- - events = events_collector.get_events(block=True, count=2) + # events = events_collector.get_events(block=True, count=2) - assert isinstance(events[0], TopologyEvent) - assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[0].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert events[0].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + # assert isinstance(events[0], TopologyEvent) + # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + # assert events[0].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + # assert events[0].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - assert isinstance(events[1], ContextEvent) - assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - assert events[1].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + # assert isinstance(events[1], ContextEvent) + # assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + # assert events[1].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID # ----- Update the object ------------------------------------------------------------------------------------------ response = context_client_grpc.SetTopology(Topology(**TOPOLOGY)) @@ -343,19 +330,19 @@ def test_grpc_topology( assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID # ----- Check update event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True) - assert isinstance(event, TopologyEvent) - assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - assert event.topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert event.topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + # event = events_collector.get_event(block=True) + # assert isinstance(event, TopologyEvent) + # assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + # assert event.topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + # assert event.topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID # ----- Dump state of database after create/update the object ------------------------------------------------------ - db_entries = context_database.dump() + db_entries = database.query_all(TopologyModel) LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover + # for db_entry in db_entries: + # LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 5 + assert len(db_entries) == 1 # ----- Get when the object exists --------------------------------------------------------------------------------- response = context_client_grpc.GetTopology(TopologyId(**TOPOLOGY_ID)) @@ -382,28 +369,29 @@ def test_grpc_topology( context_client_grpc.RemoveContext(ContextId(**CONTEXT_ID)) # ----- Check remove event ----------------------------------------------------------------------------------------- - events = events_collector.get_events(block=True, count=2) + # events = events_collector.get_events(block=True, count=2) - assert isinstance(events[0], TopologyEvent) - assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[0].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert events[0].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + # assert isinstance(events[0], TopologyEvent) + # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + # assert events[0].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + # assert events[0].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - assert isinstance(events[1], ContextEvent) - assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[1].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + # assert isinstance(events[1], ContextEvent) + # assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + # assert events[1].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- - events_collector.stop() + # events_collector.stop() # ----- Dump state of database after remove the object ------------------------------------------------------------- - db_entries = context_database.dump() + db_entries = database.query_all(TopologyModel) LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover + # for db_entry in db_entries: + # LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover LOGGER.info('-----------------------------------------------------------') assert len(db_entries) == 0 + """ def test_grpc_device( context_client_grpc : ContextClient, # pylint: disable=redefined-outer-name -- GitLab From 979f3d4124a443b0bcbeb8a3b1e3e19030b373eb Mon Sep 17 00:00:00 2001 From: cmanso <cmanso@protonmail.com> Date: Tue, 13 Sep 2022 16:24:46 +0200 Subject: [PATCH 003/158] Topology model updated to SQLAlchemy --- .../service/grpc_server/ContextServiceServicerImpl.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/src/context/service/grpc_server/ContextServiceServicerImpl.py b/src/context/service/grpc_server/ContextServiceServicerImpl.py index bf51bf316..9952444b7 100644 --- a/src/context/service/grpc_server/ContextServiceServicerImpl.py +++ b/src/context/service/grpc_server/ContextServiceServicerImpl.py @@ -195,7 +195,7 @@ class ContextServiceServicerImpl(ContextServiceServicer): return TopologyList(topologies=[db_topology.dump() for db_topology in db_topologies]) @safe_and_metered_rpc_method(METRICS, LOGGER) - def GetTopology(self, request: TopologyId, contextt : grpc.ServicerContext) -> Topology: + def GetTopology(self, request: TopologyId, context : grpc.ServicerContext) -> Topology: context_uuid = request.context_id.context_uuid.uuid topology_uuid = request.topology_uuid.uuid @@ -215,10 +215,9 @@ class ContextServiceServicerImpl(ContextServiceServicer): with self.session() as session: db_context: ContextModel = session.query(ContextModel).filter_by(context_uuid=context_uuid).one() - topology_add = TopologyModel(topology_uuid=topology_uuid, context_fk=context_uuid) - topology_add.context = db_context - updated = True - with self.session() as session: + topology_add = TopologyModel(topology_uuid=topology_uuid, context_fk=context_uuid) + topology_add.context = db_context + updated = True result = session.query(TopologyModel).join(TopologyModel.context).filter(TopologyModel.topology_uuid==topology_uuid).options(contains_eager(TopologyModel.context)).one_or_none() if not result: -- GitLab From 0406cd2766ead8b98c7d4c75ae6f06dab0f12697 Mon Sep 17 00:00:00 2001 From: cmanso <cmanso@protonmail.com> Date: Tue, 20 Sep 2022 15:02:51 +0200 Subject: [PATCH 004/158] Topology model updated to SQLAlchemy --- src/context/service/__main__.py | 2 +- src/context/service/database/TopologyModel.py | 6 ++-- .../grpc_server/ContextServiceServicerImpl.py | 36 +++++++++---------- src/context/tests/test_unitary.py | 2 +- 4 files changed, 21 insertions(+), 25 deletions(-) diff --git a/src/context/service/__main__.py b/src/context/service/__main__.py index 154c8ff00..937059202 100644 --- a/src/context/service/__main__.py +++ b/src/context/service/__main__.py @@ -52,7 +52,7 @@ def main(): start_http_server(metrics_port) # Get database instance - db_uri = 'cockroachdb://root@10.152.183.121:26257/defaultdb?sslmode=disable' + db_uri = 'cockroachdb://root@10.152.183.66:26257/defaultdb?sslmode=disable' LOGGER.debug('Connecting to DB: {}'.format(db_uri)) # engine = create_engine(db_uri, echo=False) diff --git a/src/context/service/database/TopologyModel.py b/src/context/service/database/TopologyModel.py index 9f117c73c..ec8427b07 100644 --- a/src/context/service/database/TopologyModel.py +++ b/src/context/service/database/TopologyModel.py @@ -27,11 +27,11 @@ LOGGER = logging.getLogger(__name__) class TopologyModel(Base): __tablename__ = 'Topology' - context_fk = Column(UUID(as_uuid=False), ForeignKey("Context.context_uuid"), nullable=False) - topology_uuid = Column(UUID(as_uuid=False), primary_key=True, nullable=False) + context_uuid = Column(UUID(as_uuid=False), ForeignKey("Context.context_uuid"), primary_key=True) + topology_uuid = Column(UUID(as_uuid=False), primary_key=True) # Relationships - context = relationship("ContextModel", back_populates="topology", lazy="joined") + context = relationship("ContextModel", back_populates="topology", lazy="subquery") def dump_id(self) -> Dict: context_id = self.context.dump_id() diff --git a/src/context/service/grpc_server/ContextServiceServicerImpl.py b/src/context/service/grpc_server/ContextServiceServicerImpl.py index 9952444b7..5439b6c06 100644 --- a/src/context/service/grpc_server/ContextServiceServicerImpl.py +++ b/src/context/service/grpc_server/ContextServiceServicerImpl.py @@ -170,11 +170,10 @@ class ContextServiceServicerImpl(ContextServiceServicer): @safe_and_metered_rpc_method(METRICS, LOGGER) def ListTopologyIds(self, request: ContextId, context : grpc.ServicerContext) -> TopologyIdList: - with self.lock: - context_uuid = request.context_uuid.uuid + context_uuid = request.context_uuid.uuid - with self.session() as session: - result = session.query(ContextModel).options(selectinload(ContextModel.topology)).filter_by(context_uuid=context_uuid).one_or_none() + with self.session() as session: + result = session.query(ContextModel).options(selectinload(ContextModel.topology)).filter_by(context_uuid=context_uuid).one_or_none() if not result: raise NotFoundException(ContextModel.__name__.replace('Model', ''), context_uuid) @@ -188,11 +187,11 @@ class ContextServiceServicerImpl(ContextServiceServicer): with self.session() as session: result = session.query(ContextModel).options(selectinload(ContextModel.topology)).filter_by( context_uuid=context_uuid).one_or_none() - if not result: - raise NotFoundException(ContextModel.__name__.replace('Model', ''), context_uuid) + if not result: + raise NotFoundException(ContextModel.__name__.replace('Model', ''), context_uuid) - db_topologies = result.topology - return TopologyList(topologies=[db_topology.dump() for db_topology in db_topologies]) + db_topologies = result.topology + return TopologyList(topologies=[db_topology.dump() for db_topology in db_topologies]) @safe_and_metered_rpc_method(METRICS, LOGGER) def GetTopology(self, request: TopologyId, context : grpc.ServicerContext) -> Topology: @@ -213,22 +212,19 @@ class ContextServiceServicerImpl(ContextServiceServicer): context_uuid = request.topology_id.context_id.context_uuid.uuid topology_uuid = request.topology_id.topology_uuid.uuid with self.session() as session: - db_context: ContextModel = session.query(ContextModel).filter_by(context_uuid=context_uuid).one() - - topology_add = TopologyModel(topology_uuid=topology_uuid, context_fk=context_uuid) - topology_add.context = db_context + topology_add = TopologyModel(topology_uuid=topology_uuid, context_uuid=context_uuid) updated = True - result = session.query(TopologyModel).join(TopologyModel.context).filter(TopologyModel.topology_uuid==topology_uuid).options(contains_eager(TopologyModel.context)).one_or_none() - + result = session.query(TopologyModel).join(TopologyModel.context).filter(TopologyModel.topology_uuid==topology_uuid).one_or_none() if not result: updated = False session.merge(topology_add) session.commit() + result = session.query(TopologyModel).join(TopologyModel.context).filter(TopologyModel.topology_uuid==topology_uuid).one_or_none() - event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE - dict_topology_id = topology_add.dump_id() - notify_event(self.messagebroker, TOPIC_TOPOLOGY, event_type, {'topology_id': dict_topology_id}) - return TopologyId(**dict_topology_id) + event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE + dict_topology_id = result.dump_id() + notify_event(self.messagebroker, TOPIC_TOPOLOGY, event_type, {'topology_id': dict_topology_id}) + return TopologyId(**dict_topology_id) @safe_and_metered_rpc_method(METRICS, LOGGER) def RemoveTopology(self, request: TopologyId, context : grpc.ServicerContext) -> Empty: @@ -236,12 +232,12 @@ class ContextServiceServicerImpl(ContextServiceServicer): topology_uuid = request.topology_uuid.uuid with self.session() as session: - result = session.query(TopologyModel).filter_by(topology_uuid=topology_uuid, context_fk=context_uuid).one_or_none() + result = session.query(TopologyModel).filter_by(topology_uuid=topology_uuid, context_uuid=context_uuid).one_or_none() if not result: return Empty() dict_topology_id = result.dump_id() - session.query(TopologyModel).filter_by(topology_uuid=topology_uuid, context_fk=context_uuid).delete() + session.query(TopologyModel).filter_by(topology_uuid=topology_uuid, context_uuid=context_uuid).delete() session.commit() event_type = EventTypeEnum.EVENTTYPE_REMOVE notify_event(self.messagebroker, TOPIC_TOPOLOGY, event_type, {'topology_id': dict_topology_id}) diff --git a/src/context/tests/test_unitary.py b/src/context/tests/test_unitary.py index b7a9cee92..e202de498 100644 --- a/src/context/tests/test_unitary.py +++ b/src/context/tests/test_unitary.py @@ -84,7 +84,7 @@ def context_s_mb(request) -> Tuple[Session, MessageBroker]: msg = 'Running scenario {:s} db_session={:s}, mb_backend={:s}, mb_settings={:s}...' LOGGER.info(msg.format(str(name), str(db_session), str(mb_backend.value), str(mb_settings))) - db_uri = 'cockroachdb://root@10.152.183.121:26257/defaultdb?sslmode=disable' + db_uri = 'cockroachdb://root@10.152.183.66:26257/defaultdb?sslmode=disable' LOGGER.debug('Connecting to DB: {}'.format(db_uri)) try: -- GitLab From 24301258560fa43cbf981abc472c311b492aa94e Mon Sep 17 00:00:00 2001 From: cmanso <cmanso@protonmail.com> Date: Fri, 23 Sep 2022 12:36:30 +0200 Subject: [PATCH 005/158] Topology model updated to SQLAlchemy --- src/context/service/__main__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/context/service/__main__.py b/src/context/service/__main__.py index 937059202..93c0e4748 100644 --- a/src/context/service/__main__.py +++ b/src/context/service/__main__.py @@ -52,7 +52,7 @@ def main(): start_http_server(metrics_port) # Get database instance - db_uri = 'cockroachdb://root@10.152.183.66:26257/defaultdb?sslmode=disable' + db_uri = 'cockroachdb://root@10.152.183.111:26257/defaultdb?sslmode=disable' LOGGER.debug('Connecting to DB: {}'.format(db_uri)) # engine = create_engine(db_uri, echo=False) -- GitLab From facab6d65b6413e462284a0c1e49e1fc4cf00bba Mon Sep 17 00:00:00 2001 From: cmanso <cmanso@protonmail.com> Date: Mon, 3 Oct 2022 08:54:36 +0200 Subject: [PATCH 006/158] Device model updated to SQLAlchemy --- src/context/service/Database.py | 89 +++++- src/context/service/__main__.py | 2 +- src/context/service/database/ConfigModel.py | 87 ++++-- src/context/service/database/ContextModel.py | 3 + src/context/service/database/DeviceModel.py | 104 ++++--- src/context/service/database/EndPointModel.py | 54 ++-- src/context/service/database/KpiSampleType.py | 4 +- src/context/service/database/Tools.py | 3 +- src/context/service/database/TopologyModel.py | 13 +- .../grpc_server/ContextServiceServicerImpl.py | 280 ++++++++++++------ src/context/tests/Objects.py | 13 +- src/context/tests/test_unitary.py | 119 ++++---- 12 files changed, 507 insertions(+), 264 deletions(-) diff --git a/src/context/service/Database.py b/src/context/service/Database.py index 281761ed8..8fae9f652 100644 --- a/src/context/service/Database.py +++ b/src/context/service/Database.py @@ -1,6 +1,12 @@ +from typing import Tuple, List + +from sqlalchemy import MetaData from sqlalchemy.orm import Session from context.service.database.Base import Base import logging +from common.orm.backend.Tools import key_to_str + +from common.rpc_method_wrapper.ServiceExceptions import NotFoundException LOGGER = logging.getLogger(__name__) @@ -10,7 +16,7 @@ class Database(Session): super().__init__() self.session = session - def query_all(self, model): + def get_all(self, model): result = [] with self.session() as session: for entry in session.query(model).all(): @@ -18,11 +24,88 @@ class Database(Session): return result - def get_object(self): - pass + def create_or_update(self, model): + with self.session() as session: + att = getattr(model, model.main_pk_name()) + filt = {model.main_pk_name(): att} + found = session.query(type(model)).filter_by(**filt).one_or_none() + if found: + found = True + else: + found = False + + session.merge(model) + session.commit() + return model, found + + def create(self, model): + with self.session() as session: + session.add(model) + session.commit() + return model + + def remove(self, model, filter_d): + model_t = type(model) + with self.session() as session: + session.query(model_t).filter_by(**filter_d).delete() + session.commit() + def clear(self): with self.session() as session: engine = session.get_bind() Base.metadata.drop_all(engine) Base.metadata.create_all(engine) + + def dump_by_table(self): + with self.session() as session: + engine = session.get_bind() + meta = MetaData() + meta.reflect(engine) + result = {} + + for table in meta.sorted_tables: + result[table.name] = [dict(row) for row in engine.execute(table.select())] + LOGGER.info(result) + return result + + def dump_all(self): + with self.session() as session: + engine = session.get_bind() + meta = MetaData() + meta.reflect(engine) + result = [] + + for table in meta.sorted_tables: + for row in engine.execute(table.select()): + result.append((table.name, dict(row))) + LOGGER.info(result) + + return result + + def get_object(self, model_class: Base, main_key: str, raise_if_not_found=False): + filt = {model_class.main_pk_name(): main_key} + with self.session() as session: + get = session.query(model_class).filter_by(**filt).one_or_none() + + if not get: + if raise_if_not_found: + raise NotFoundException(model_class.__name__.replace('Model', ''), main_key) + + return get + def get_or_create(self, model_class: Base, key_parts: List[str] + ) -> Tuple[Base, bool]: + + str_key = key_to_str(key_parts) + filt = {model_class.main_pk_name(): key_parts} + with self.session() as session: + get = session.query(model_class).filter_by(**filt).one_or_none() + if get: + return get, False + else: + obj = model_class() + setattr(obj, model_class.main_pk_name(), str_key) + LOGGER.info(obj.dump()) + session.add(obj) + session.commit() + return obj, True diff --git a/src/context/service/__main__.py b/src/context/service/__main__.py index 93c0e4748..9fc2f2357 100644 --- a/src/context/service/__main__.py +++ b/src/context/service/__main__.py @@ -65,7 +65,7 @@ def main(): return 1 Base.metadata.create_all(engine) - session = sessionmaker(bind=engine) + session = sessionmaker(bind=engine, expire_on_commit=False) # Get message broker instance messagebroker = MessageBroker(get_messagebroker_backend()) diff --git a/src/context/service/database/ConfigModel.py b/src/context/service/database/ConfigModel.py index bb2a37467..4dcd50c2c 100644 --- a/src/context/service/database/ConfigModel.py +++ b/src/context/service/database/ConfigModel.py @@ -11,26 +11,23 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +import enum import functools, logging, operator -from enum import Enum from typing import Dict, List, Optional, Tuple, Union -from common.orm.Database import Database -from common.orm.HighLevel import get_object, get_or_create_object, update_or_create_object from common.orm.backend.Tools import key_to_str -from common.orm.fields.EnumeratedField import EnumeratedField -from common.orm.fields.ForeignKeyField import ForeignKeyField -from common.orm.fields.IntegerField import IntegerField -from common.orm.fields.PrimaryKeyField import PrimaryKeyField -from common.orm.fields.StringField import StringField -from common.orm.model.Model import Model from common.proto.context_pb2 import ConfigActionEnum from common.tools.grpc.Tools import grpc_message_to_json_string +from sqlalchemy import Column, ForeignKey, INTEGER, CheckConstraint, Enum, String +from sqlalchemy.dialects.postgresql import UUID, ARRAY +from context.service.database.Base import Base +from sqlalchemy.orm import relationship +from context.service.Database import Database + from .Tools import fast_hasher, grpc_to_enum, remove_dict_key LOGGER = logging.getLogger(__name__) -class ORM_ConfigActionEnum(Enum): +class ORM_ConfigActionEnum(enum.Enum): UNDEFINED = ConfigActionEnum.CONFIGACTION_UNDEFINED SET = ConfigActionEnum.CONFIGACTION_SET DELETE = ConfigActionEnum.CONFIGACTION_DELETE @@ -38,27 +35,47 @@ class ORM_ConfigActionEnum(Enum): grpc_to_enum__config_action = functools.partial( grpc_to_enum, ConfigActionEnum, ORM_ConfigActionEnum) -class ConfigModel(Model): # pylint: disable=abstract-method - pk = PrimaryKeyField() +class ConfigModel(Base): # pylint: disable=abstract-method + __tablename__ = 'Config' + config_uuid = Column(UUID(as_uuid=False), primary_key=True) + + # Relationships + config_rule = relationship("ConfigRuleModel", back_populates="config", lazy="dynamic") + def delete(self) -> None: db_config_rule_pks = self.references(ConfigRuleModel) for pk,_ in db_config_rule_pks: ConfigRuleModel(self.database, pk).delete() super().delete() - def dump(self) -> List[Dict]: - db_config_rule_pks = self.references(ConfigRuleModel) - config_rules = [ConfigRuleModel(self.database, pk).dump(include_position=True) for pk,_ in db_config_rule_pks] - config_rules = sorted(config_rules, key=operator.itemgetter('position')) + def dump(self): # -> List[Dict]: + config_rules = [] + for a in self.config_rule: + asdf = a.dump() + config_rules.append(asdf) return [remove_dict_key(config_rule, 'position') for config_rule in config_rules] -class ConfigRuleModel(Model): # pylint: disable=abstract-method - pk = PrimaryKeyField() - config_fk = ForeignKeyField(ConfigModel) - position = IntegerField(min_value=0, required=True) - action = EnumeratedField(ORM_ConfigActionEnum, required=True) - key = StringField(required=True, allow_empty=False) - value = StringField(required=True, allow_empty=False) + @staticmethod + def main_pk_name(): + return 'config_uuid' + +class ConfigRuleModel(Base): # pylint: disable=abstract-method + __tablename__ = 'ConfigRule' + config_rule_uuid = Column(UUID(as_uuid=False), primary_key=True) + config_uuid = Column(UUID(as_uuid=False), ForeignKey("Config.config_uuid"), primary_key=True) + + action = Column(Enum(ORM_ConfigActionEnum, create_constraint=True, native_enum=True), nullable=False) + position = Column(INTEGER, nullable=False) + key = Column(String, nullable=False) + value = Column(String, nullable=False) + + __table_args__ = ( + CheckConstraint(position >= 0, name='check_position_value'), + {} + ) + + # Relationships + config = relationship("ConfigModel", back_populates="config_rule") def dump(self, include_position=True) -> Dict: # pylint: disable=arguments-differ result = { @@ -71,17 +88,23 @@ class ConfigRuleModel(Model): # pylint: disable=abstract-method if include_position: result['position'] = self.position return result + @staticmethod + def main_pk_name(): + return 'config_rule_uuid' + def set_config_rule( - database : Database, db_config : ConfigModel, position : int, resource_key : str, resource_value : str -) -> Tuple[ConfigRuleModel, bool]: + database : Database, db_config : ConfigModel, position : int, resource_key : str, resource_value : str, +): # -> Tuple[ConfigRuleModel, bool]: str_rule_key_hash = fast_hasher(resource_key) - str_config_rule_key = key_to_str([db_config.pk, str_rule_key_hash], separator=':') - result : Tuple[ConfigRuleModel, bool] = update_or_create_object(database, ConfigRuleModel, str_config_rule_key, { - 'config_fk': db_config, 'position': position, 'action': ORM_ConfigActionEnum.SET, - 'key': resource_key, 'value': resource_value}) - db_config_rule, updated = result - return db_config_rule, updated + str_config_rule_key = key_to_str([db_config.config_uuid, str_rule_key_hash], separator=':') + + data = {'config_fk': db_config, 'position': position, 'action': ORM_ConfigActionEnum.SET, 'key': resource_key, + 'value': resource_value} + to_add = ConfigRuleModel(**data) + + result = database.create_or_update(to_add) + return result def delete_config_rule( database : Database, db_config : ConfigModel, resource_key : str diff --git a/src/context/service/database/ContextModel.py b/src/context/service/database/ContextModel.py index 77a95ea03..ef1d485be 100644 --- a/src/context/service/database/ContextModel.py +++ b/src/context/service/database/ContextModel.py @@ -33,6 +33,9 @@ class ContextModel(Base): def dump_id(self) -> Dict: return {'context_uuid': {'uuid': self.context_uuid}} + def main_pk_name(self): + return 'context_uuid' + """ def dump_service_ids(self) -> List[Dict]: from .ServiceModel import ServiceModel # pylint: disable=import-outside-toplevel diff --git a/src/context/service/database/DeviceModel.py b/src/context/service/database/DeviceModel.py index 0d4232679..bf8f73c79 100644 --- a/src/context/service/database/DeviceModel.py +++ b/src/context/service/database/DeviceModel.py @@ -11,24 +11,22 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +import enum import functools, logging -from enum import Enum +import uuid from typing import Dict, List from common.orm.Database import Database from common.orm.backend.Tools import key_to_str -from common.orm.fields.EnumeratedField import EnumeratedField -from common.orm.fields.ForeignKeyField import ForeignKeyField -from common.orm.fields.PrimaryKeyField import PrimaryKeyField -from common.orm.fields.StringField import StringField -from common.orm.model.Model import Model from common.proto.context_pb2 import DeviceDriverEnum, DeviceOperationalStatusEnum -from .ConfigModel import ConfigModel +from sqlalchemy import Column, ForeignKey, String, Enum +from sqlalchemy.dialects.postgresql import UUID, ARRAY +from context.service.database.Base import Base +from sqlalchemy.orm import relationship from .Tools import grpc_to_enum LOGGER = logging.getLogger(__name__) -class ORM_DeviceDriverEnum(Enum): +class ORM_DeviceDriverEnum(enum.Enum): UNDEFINED = DeviceDriverEnum.DEVICEDRIVER_UNDEFINED OPENCONFIG = DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG TRANSPORT_API = DeviceDriverEnum.DEVICEDRIVER_TRANSPORT_API @@ -39,7 +37,7 @@ class ORM_DeviceDriverEnum(Enum): grpc_to_enum__device_driver = functools.partial( grpc_to_enum, DeviceDriverEnum, ORM_DeviceDriverEnum) -class ORM_DeviceOperationalStatusEnum(Enum): +class ORM_DeviceOperationalStatusEnum(enum.Enum): UNDEFINED = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_UNDEFINED DISABLED = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_DISABLED ENABLED = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED @@ -47,48 +45,51 @@ class ORM_DeviceOperationalStatusEnum(Enum): grpc_to_enum__device_operational_status = functools.partial( grpc_to_enum, DeviceOperationalStatusEnum, ORM_DeviceOperationalStatusEnum) -class DeviceModel(Model): - pk = PrimaryKeyField() - device_uuid = StringField(required=True, allow_empty=False) - device_type = StringField() - device_config_fk = ForeignKeyField(ConfigModel) - device_operational_status = EnumeratedField(ORM_DeviceOperationalStatusEnum, required=True) - - def delete(self) -> None: - # pylint: disable=import-outside-toplevel - from .EndPointModel import EndPointModel - from .RelationModels import TopologyDeviceModel - - for db_endpoint_pk,_ in self.references(EndPointModel): - EndPointModel(self.database, db_endpoint_pk).delete() - - for db_topology_device_pk,_ in self.references(TopologyDeviceModel): - TopologyDeviceModel(self.database, db_topology_device_pk).delete() - - for db_driver_pk,_ in self.references(DriverModel): - DriverModel(self.database, db_driver_pk).delete() - - super().delete() - - ConfigModel(self.database, self.device_config_fk).delete() +class DeviceModel(Base): + __tablename__ = 'Device' + device_uuid = Column(UUID(as_uuid=False), primary_key=True) + device_type = Column(String) + device_config_uuid = Column(UUID(as_uuid=False), ForeignKey("Config.config_uuid")) + device_operational_status = Column(Enum(ORM_DeviceOperationalStatusEnum, create_constraint=False, + native_enum=False)) + + # Relationships + device_config = relationship("ConfigModel", lazy="joined") + driver = relationship("DriverModel", lazy="joined") + endpoints = relationship("EndPointModel", lazy="joined") + + # def delete(self) -> None: + # # pylint: disable=import-outside-toplevel + # from .EndPointModel import EndPointModel + # from .RelationModels import TopologyDeviceModel + # + # for db_endpoint_pk,_ in self.references(EndPointModel): + # EndPointModel(self.database, db_endpoint_pk).delete() + # + # for db_topology_device_pk,_ in self.references(TopologyDeviceModel): + # TopologyDeviceModel(self.database, db_topology_device_pk).delete() + # + # for db_driver_pk,_ in self.references(DriverModel): + # DriverModel(self.database, db_driver_pk).delete() + # + # super().delete() + # + # ConfigModel(self.database, self.device_config_fk).delete() def dump_id(self) -> Dict: return {'device_uuid': {'uuid': self.device_uuid}} def dump_config(self) -> Dict: - return ConfigModel(self.database, self.device_config_fk).dump() + return self.device_config.dump() def dump_drivers(self) -> List[int]: - db_driver_pks = self.references(DriverModel) - return [DriverModel(self.database, pk).dump() for pk,_ in db_driver_pks] + return self.driver.dump() def dump_endpoints(self) -> List[Dict]: - from .EndPointModel import EndPointModel # pylint: disable=import-outside-toplevel - db_endpoints_pks = self.references(EndPointModel) - return [EndPointModel(self.database, pk).dump() for pk,_ in db_endpoints_pks] + return self.endpoints.dump() def dump( # pylint: disable=arguments-differ - self, include_config_rules=True, include_drivers=True, include_endpoints=True + self, include_config_rules=True, include_drivers=False, include_endpoints=False ) -> Dict: result = { 'device_id': self.dump_id(), @@ -100,16 +101,27 @@ class DeviceModel(Model): if include_endpoints: result['device_endpoints'] = self.dump_endpoints() return result -class DriverModel(Model): # pylint: disable=abstract-method - pk = PrimaryKeyField() - device_fk = ForeignKeyField(DeviceModel) - driver = EnumeratedField(ORM_DeviceDriverEnum, required=True) + def main_pk_name(self): + return 'device_uuid' + +class DriverModel(Base): # pylint: disable=abstract-method + __tablename__ = 'Driver' + driver_uuid = Column(UUID(as_uuid=False), primary_key=True) + device_uuid = Column(UUID(as_uuid=False), ForeignKey("Device.device_uuid"), primary_key=True) + driver = Column(Enum(ORM_DeviceDriverEnum, create_constraint=False, native_enum=False)) + + # Relationships + device = relationship("DeviceModel") + def dump(self) -> Dict: return self.driver.value + def main_pk_name(self): + return 'driver_uuid' + def set_drivers(database : Database, db_device : DeviceModel, grpc_device_drivers): - db_device_pk = db_device.pk + db_device_pk = db_device.device_uuid for driver in grpc_device_drivers: orm_driver = grpc_to_enum__device_driver(driver) str_device_driver_key = key_to_str([db_device_pk, orm_driver.name]) diff --git a/src/context/service/database/EndPointModel.py b/src/context/service/database/EndPointModel.py index aeef91b65..669b590e3 100644 --- a/src/context/service/database/EndPointModel.py +++ b/src/context/service/database/EndPointModel.py @@ -17,24 +17,25 @@ from typing import Dict, List, Optional, Tuple from common.orm.Database import Database from common.orm.HighLevel import get_object from common.orm.backend.Tools import key_to_str -from common.orm.fields.EnumeratedField import EnumeratedField -from common.orm.fields.ForeignKeyField import ForeignKeyField -from common.orm.fields.PrimaryKeyField import PrimaryKeyField -from common.orm.fields.StringField import StringField -from common.orm.model.Model import Model from common.proto.context_pb2 import EndPointId -from .DeviceModel import DeviceModel from .KpiSampleType import ORM_KpiSampleTypeEnum, grpc_to_enum__kpi_sample_type -from .TopologyModel import TopologyModel - +from sqlalchemy import Column, ForeignKey, String, Enum, ForeignKeyConstraint +from sqlalchemy.dialects.postgresql import UUID, ARRAY +from context.service.database.Base import Base +from sqlalchemy.orm import relationship LOGGER = logging.getLogger(__name__) -class EndPointModel(Model): - pk = PrimaryKeyField() - topology_fk = ForeignKeyField(TopologyModel, required=False) - device_fk = ForeignKeyField(DeviceModel) - endpoint_uuid = StringField(required=True, allow_empty=False) - endpoint_type = StringField() +class EndPointModel(Base): + __tablename__ = 'EndPoint' + endpoint_uuid = Column(UUID(as_uuid=False), primary_key=True, unique=True) + topology_uuid = Column(UUID(as_uuid=False), ForeignKey("Topology.topology_uuid"), primary_key=True) + device_uuid = Column(UUID(as_uuid=False), ForeignKey("Device.device_uuid"), primary_key=True) + endpoint_type = Column(String) + + # Relationships + + def main_pk_name(self): + return 'endpoint_uuid' def delete(self) -> None: for db_kpi_sample_type_pk,_ in self.references(KpiSampleTypeModel): @@ -42,13 +43,10 @@ class EndPointModel(Model): super().delete() def dump_id(self) -> Dict: - device_id = DeviceModel(self.database, self.device_fk).dump_id() result = { - 'device_id': device_id, + 'device_uuid': self.device_uuid, 'endpoint_uuid': {'uuid': self.endpoint_uuid}, } - if self.topology_fk is not None: - result['topology_id'] = TopologyModel(self.database, self.topology_fk).dump_id() return result def dump_kpi_sample_types(self) -> List[int]: @@ -59,20 +57,26 @@ class EndPointModel(Model): self, include_kpi_sample_types=True ) -> Dict: result = { - 'endpoint_id': self.dump_id(), + 'endpoint_uuid': self.dump_id(), 'endpoint_type': self.endpoint_type, } if include_kpi_sample_types: result['kpi_sample_types'] = self.dump_kpi_sample_types() return result -class KpiSampleTypeModel(Model): # pylint: disable=abstract-method - pk = PrimaryKeyField() - endpoint_fk = ForeignKeyField(EndPointModel) - kpi_sample_type = EnumeratedField(ORM_KpiSampleTypeEnum, required=True) - +class KpiSampleTypeModel(Base): # pylint: disable=abstract-method + __tablename__ = 'KpiSampleType' + kpi_uuid = Column(UUID(as_uuid=False), primary_key=True) + endpoint_uuid = Column(UUID(as_uuid=False), ForeignKey("EndPoint.endpoint_uuid")) + kpi_sample_type = Column(Enum(ORM_KpiSampleTypeEnum, create_constraint=False, + native_enum=False)) + # __table_args__ = (ForeignKeyConstraint([endpoint_uuid], [EndPointModel.endpoint_uuid]), {}) def dump(self) -> Dict: return self.kpi_sample_type.value + def main_pk_name(self): + return 'kpi_uuid' + +""" def set_kpi_sample_types(database : Database, db_endpoint : EndPointModel, grpc_endpoint_kpi_sample_types): db_endpoint_pk = db_endpoint.pk for kpi_sample_type in grpc_endpoint_kpi_sample_types: @@ -82,7 +86,7 @@ def set_kpi_sample_types(database : Database, db_endpoint : EndPointModel, grpc_ db_endpoint_kpi_sample_type.endpoint_fk = db_endpoint db_endpoint_kpi_sample_type.kpi_sample_type = orm_kpi_sample_type db_endpoint_kpi_sample_type.save() - +""" def get_endpoint( database : Database, grpc_endpoint_id : EndPointId, validate_topology_exists : bool = True, validate_device_in_topology : bool = True diff --git a/src/context/service/database/KpiSampleType.py b/src/context/service/database/KpiSampleType.py index 0a2015b3f..7f122f185 100644 --- a/src/context/service/database/KpiSampleType.py +++ b/src/context/service/database/KpiSampleType.py @@ -13,11 +13,11 @@ # limitations under the License. import functools -from enum import Enum +import enum from common.proto.kpi_sample_types_pb2 import KpiSampleType from .Tools import grpc_to_enum -class ORM_KpiSampleTypeEnum(Enum): +class ORM_KpiSampleTypeEnum(enum.Enum): UNKNOWN = KpiSampleType.KPISAMPLETYPE_UNKNOWN PACKETS_TRANSMITTED = KpiSampleType.KPISAMPLETYPE_PACKETS_TRANSMITTED PACKETS_RECEIVED = KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED diff --git a/src/context/service/database/Tools.py b/src/context/service/database/Tools.py index 43bb71bd9..44a5aa264 100644 --- a/src/context/service/database/Tools.py +++ b/src/context/service/database/Tools.py @@ -15,8 +15,9 @@ import hashlib, re from enum import Enum from typing import Dict, List, Tuple, Union - +import logging # Convenient helper function to remove dictionary items in dict/list/set comprehensions. +LOGGER = logging.getLogger(__name__) def remove_dict_key(dictionary : Dict, key : str): dictionary.pop(key, None) diff --git a/src/context/service/database/TopologyModel.py b/src/context/service/database/TopologyModel.py index ec8427b07..2925a27fa 100644 --- a/src/context/service/database/TopologyModel.py +++ b/src/context/service/database/TopologyModel.py @@ -14,11 +14,6 @@ import logging, operator from typing import Dict, List -from common.orm.fields.ForeignKeyField import ForeignKeyField -from common.orm.fields.PrimaryKeyField import PrimaryKeyField -from common.orm.fields.StringField import StringField -from common.orm.model.Model import Model -from common.orm.HighLevel import get_related_objects from sqlalchemy.orm import relationship from sqlalchemy import Column, ForeignKey from sqlalchemy.dialects.postgresql import UUID @@ -28,10 +23,10 @@ LOGGER = logging.getLogger(__name__) class TopologyModel(Base): __tablename__ = 'Topology' context_uuid = Column(UUID(as_uuid=False), ForeignKey("Context.context_uuid"), primary_key=True) - topology_uuid = Column(UUID(as_uuid=False), primary_key=True) + topology_uuid = Column(UUID(as_uuid=False), primary_key=True, unique=True) # Relationships - context = relationship("ContextModel", back_populates="topology", lazy="subquery") + context = relationship("ContextModel", back_populates="topology", lazy="joined") def dump_id(self) -> Dict: context_id = self.context.dump_id() @@ -40,6 +35,10 @@ class TopologyModel(Base): 'topology_uuid': {'uuid': self.topology_uuid}, } + @staticmethod + def main_pk_name() -> str: + return 'topology_uuid' + """def dump_device_ids(self) -> List[Dict]: from .RelationModels import TopologyDeviceModel # pylint: disable=import-outside-toplevel db_devices = get_related_objects(self, TopologyDeviceModel, 'device_fk') diff --git a/src/context/service/grpc_server/ContextServiceServicerImpl.py b/src/context/service/grpc_server/ContextServiceServicerImpl.py index 5439b6c06..d104d5567 100644 --- a/src/context/service/grpc_server/ContextServiceServicerImpl.py +++ b/src/context/service/grpc_server/ContextServiceServicerImpl.py @@ -11,9 +11,10 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import uuid import grpc, json, logging, operator, threading -from typing import Iterator, List, Set, Tuple +from typing import Iterator, List, Set, Tuple, Union from common.message_broker.MessageBroker import MessageBroker from context.service.Database import Database @@ -25,19 +26,24 @@ from common.proto.context_pb2 import ( Link, LinkEvent, LinkId, LinkIdList, LinkList, Service, ServiceEvent, ServiceId, ServiceIdList, ServiceList, Slice, SliceEvent, SliceId, SliceIdList, SliceList, - Topology, TopologyEvent, TopologyId, TopologyIdList, TopologyList) + Topology, TopologyEvent, TopologyId, TopologyIdList, TopologyList, + ConfigActionEnum) from common.proto.context_pb2_grpc import ContextServiceServicer from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method from common.rpc_method_wrapper.ServiceExceptions import InvalidArgumentException from sqlalchemy.orm import Session, contains_eager, selectinload from common.rpc_method_wrapper.ServiceExceptions import NotFoundException +from context.service.database.ConfigModel import grpc_config_rules_to_raw +from context.service.database.DeviceModel import DeviceModel, grpc_to_enum__device_operational_status, set_drivers, grpc_to_enum__device_driver, DriverModel +from context.service.database.ConfigModel import ConfigModel, ORM_ConfigActionEnum, ConfigRuleModel +from common.orm.backend.Tools import key_to_str + +from ..database.KpiSampleType import grpc_to_enum__kpi_sample_type """ -from context.service.database.ConfigModel import grpc_config_rules_to_raw, update_config from context.service.database.ConnectionModel import ConnectionModel, set_path from context.service.database.ConstraintModel import set_constraints -from context.service.database.DeviceModel import DeviceModel, grpc_to_enum__device_operational_status, set_drivers from context.service.database.EndPointModel import EndPointModel, set_kpi_sample_types from context.service.database.Events import notify_event from context.service.database.LinkModel import LinkModel @@ -51,8 +57,9 @@ from context.service.database.TopologyModel import TopologyModel """ from context.service.database.ContextModel import ContextModel from context.service.database.TopologyModel import TopologyModel -# from context.service.database.TopologyModel import TopologyModel from context.service.database.Events import notify_event +from context.service.database.EndPointModel import EndPointModel +from context.service.database.EndPointModel import KpiSampleTypeModel from .Constants import ( CONSUME_TIMEOUT, TOPIC_CONNECTION, TOPIC_CONTEXT, TOPIC_DEVICE, TOPIC_LINK, TOPIC_SERVICE, TOPIC_SLICE, @@ -201,10 +208,10 @@ class ContextServiceServicerImpl(ContextServiceServicer): with self.session() as session: result = session.query(TopologyModel).join(TopologyModel.context).filter(TopologyModel.topology_uuid==topology_uuid).options(contains_eager(TopologyModel.context)).one_or_none() - if not result: - raise NotFoundException(TopologyModel.__name__.replace('Model', ''), topology_uuid) + if not result: + raise NotFoundException(TopologyModel.__name__.replace('Model', ''), topology_uuid) - return Topology(**result.dump()) + return Topology(**result.dump()) @safe_and_metered_rpc_method(METRICS, LOGGER) @@ -247,97 +254,201 @@ class ContextServiceServicerImpl(ContextServiceServicer): def GetTopologyEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[TopologyEvent]: for message in self.messagebroker.consume({TOPIC_TOPOLOGY}, consume_timeout=CONSUME_TIMEOUT): yield TopologyEvent(**json.loads(message.content)) - """ # ----- Device ----------------------------------------------------------------------------------------------------- @safe_and_metered_rpc_method(METRICS, LOGGER) def ListDeviceIds(self, request: Empty, context : grpc.ServicerContext) -> DeviceIdList: - with self.lock: - db_devices : List[DeviceModel] = get_all_objects(self.database, DeviceModel) - db_devices = sorted(db_devices, key=operator.attrgetter('pk')) - return DeviceIdList(device_ids=[db_device.dump_id() for db_device in db_devices]) + with self.session() as session: + result = session.query(DeviceModel).all() + return DeviceIdList(device_ids=[device.dump_id() for device in result]) @safe_and_metered_rpc_method(METRICS, LOGGER) def ListDevices(self, request: Empty, context : grpc.ServicerContext) -> DeviceList: - with self.lock: - db_devices : List[DeviceModel] = get_all_objects(self.database, DeviceModel) - db_devices = sorted(db_devices, key=operator.attrgetter('pk')) - return DeviceList(devices=[db_device.dump() for db_device in db_devices]) + with self.session() as session: + result = session.query(DeviceModel).all() + return DeviceList(devices=[device.dump_id() for device in result]) @safe_and_metered_rpc_method(METRICS, LOGGER) def GetDevice(self, request: DeviceId, context : grpc.ServicerContext) -> Device: - with self.lock: - device_uuid = request.device_uuid.uuid - db_device : DeviceModel = get_object(self.database, DeviceModel, device_uuid) - return Device(**db_device.dump( - include_config_rules=True, include_drivers=True, include_endpoints=True)) - - @safe_and_metered_rpc_method(METRICS, LOGGER) - def SetDevice(self, request: Device, context : grpc.ServicerContext) -> DeviceId: - with self.lock: - device_uuid = request.device_id.device_uuid.uuid - - for i,endpoint in enumerate(request.device_endpoints): - endpoint_device_uuid = endpoint.endpoint_id.device_id.device_uuid.uuid - if len(endpoint_device_uuid) == 0: endpoint_device_uuid = device_uuid - if device_uuid != endpoint_device_uuid: - raise InvalidArgumentException( - 'request.device_endpoints[{:d}].device_id.device_uuid.uuid'.format(i), endpoint_device_uuid, - ['should be == {:s}({:s})'.format('request.device_id.device_uuid.uuid', device_uuid)]) - - config_rules = grpc_config_rules_to_raw(request.device_config.config_rules) - running_config_result = update_config(self.database, device_uuid, 'running', config_rules) - db_running_config = running_config_result[0][0] - - result : Tuple[DeviceModel, bool] = update_or_create_object(self.database, DeviceModel, device_uuid, { - 'device_uuid' : device_uuid, - 'device_type' : request.device_type, - 'device_operational_status': grpc_to_enum__device_operational_status(request.device_operational_status), - 'device_config_fk' : db_running_config, - }) - db_device, updated = result - - set_drivers(self.database, db_device, request.device_drivers) - - for i,endpoint in enumerate(request.device_endpoints): - endpoint_uuid = endpoint.endpoint_id.endpoint_uuid.uuid - endpoint_device_uuid = endpoint.endpoint_id.device_id.device_uuid.uuid - if len(endpoint_device_uuid) == 0: endpoint_device_uuid = device_uuid - - str_endpoint_key = key_to_str([device_uuid, endpoint_uuid]) - endpoint_attributes = { - 'device_fk' : db_device, - 'endpoint_uuid': endpoint_uuid, - 'endpoint_type': endpoint.endpoint_type, - } - - endpoint_topology_context_uuid = endpoint.endpoint_id.topology_id.context_id.context_uuid.uuid - endpoint_topology_uuid = endpoint.endpoint_id.topology_id.topology_uuid.uuid - if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0: - str_topology_key = key_to_str([endpoint_topology_context_uuid, endpoint_topology_uuid]) - db_topology : TopologyModel = get_object(self.database, TopologyModel, str_topology_key) - - str_topology_device_key = key_to_str([str_topology_key, device_uuid], separator='--') - result : Tuple[TopologyDeviceModel, bool] = get_or_create_object( - self.database, TopologyDeviceModel, str_topology_device_key, { - 'topology_fk': db_topology, 'device_fk': db_device}) - #db_topology_device, topology_device_created = result + device_uuid = request.device_uuid.uuid + with self.session() as session: + result = session.query(DeviceModel).filter(DeviceModel.device_uuid == device_uuid).one_or_none() + if not result: + raise NotFoundException(DeviceModel.__name__.replace('Model', ''), device_uuid) - str_endpoint_key = key_to_str([str_endpoint_key, str_topology_key], separator=':') - endpoint_attributes['topology_fk'] = db_topology + rd = result.dump() + rt = Device(**rd) - result : Tuple[EndPointModel, bool] = update_or_create_object( - self.database, EndPointModel, str_endpoint_key, endpoint_attributes) - db_endpoint, endpoint_updated = result + return rt - set_kpi_sample_types(self.database, db_endpoint, endpoint.kpi_sample_types) + @safe_and_metered_rpc_method(METRICS, LOGGER) + def SetDevice(self, request: Device, context : grpc.ServicerContext) -> DeviceId: + device_uuid = request.device_id.device_uuid.uuid - event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE - dict_device_id = db_device.dump_id() - notify_event(self.messagebroker, TOPIC_DEVICE, event_type, {'device_id': dict_device_id}) - return DeviceId(**dict_device_id) + for i,endpoint in enumerate(request.device_endpoints): + endpoint_device_uuid = endpoint.endpoint_id.device_id.device_uuid.uuid + if len(endpoint_device_uuid) == 0: endpoint_device_uuid = device_uuid + if device_uuid != endpoint_device_uuid: + raise InvalidArgumentException( + 'request.device_endpoints[{:d}].device_id.device_uuid.uuid'.format(i), endpoint_device_uuid, + ['should be == {:s}({:s})'.format('request.device_id.device_uuid.uuid', device_uuid)]) + + config_rules = grpc_config_rules_to_raw(request.device_config.config_rules) + running_config_result = self.update_config(device_uuid, 'running', config_rules) + db_running_config = running_config_result[0][0] + config_uuid = db_running_config.config_uuid + + new_obj = DeviceModel(**{ + 'device_uuid' : device_uuid, + 'device_type' : request.device_type, + 'device_operational_status' : grpc_to_enum__device_operational_status(request.device_operational_status), + 'device_config_uuid' : config_uuid, + }) + result: Tuple[DeviceModel, bool] = self.database.create_or_update(new_obj) + db_device, updated = result + + self.set_drivers(db_device, request.device_drivers) + + for i,endpoint in enumerate(request.device_endpoints): + endpoint_uuid = endpoint.endpoint_id.endpoint_uuid.uuid + endpoint_device_uuid = endpoint.endpoint_id.device_id.device_uuid.uuid + if len(endpoint_device_uuid) == 0: endpoint_device_uuid = device_uuid + + str_endpoint_key = key_to_str([device_uuid, endpoint_uuid]) + endpoint_attributes = { + 'device_uuid' : db_device.device_uuid, + 'endpoint_uuid': endpoint_uuid, + 'endpoint_type': endpoint.endpoint_type, + } + + endpoint_topology_context_uuid = endpoint.endpoint_id.topology_id.context_id.context_uuid.uuid + endpoint_topology_uuid = endpoint.endpoint_id.topology_id.topology_uuid.uuid + if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0: + str_topology_key = key_to_str([endpoint_topology_context_uuid, endpoint_topology_uuid]) + + db_topology : TopologyModel = self.database.get_object(TopologyModel, endpoint_topology_uuid) + + str_endpoint_key = key_to_str([str_endpoint_key, str_topology_key], separator=':') + endpoint_attributes['topology_uuid'] = db_topology.topology_uuid + + new_endpoint = EndPointModel(**endpoint_attributes) + result : Tuple[EndPointModel, bool] = self.database.create_or_update(new_endpoint) + db_endpoint, updated = result + + self.set_kpi_sample_types(db_endpoint, endpoint.kpi_sample_types) + + # event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE + dict_device_id = db_device.dump_id() + # notify_event(self.messagebroker, TOPIC_DEVICE, event_type, {'device_id': dict_device_id}) + + return DeviceId(**dict_device_id) + + def set_kpi_sample_types(self, db_endpoint: EndPointModel, grpc_endpoint_kpi_sample_types): + db_endpoint_pk = db_endpoint.endpoint_uuid + for kpi_sample_type in grpc_endpoint_kpi_sample_types: + orm_kpi_sample_type = grpc_to_enum__kpi_sample_type(kpi_sample_type) + # str_endpoint_kpi_sample_type_key = key_to_str([db_endpoint_pk, orm_kpi_sample_type.name]) + data = {'endpoint_uuid': db_endpoint_pk, + 'kpi_sample_type': orm_kpi_sample_type.name, + 'kpi_uuid': str(uuid.uuid4())} + db_endpoint_kpi_sample_type = KpiSampleTypeModel(**data) + self.database.create(db_endpoint_kpi_sample_type) + + def set_drivers(self, db_device: DeviceModel, grpc_device_drivers): + db_device_pk = db_device.device_uuid + for driver in grpc_device_drivers: + orm_driver = grpc_to_enum__device_driver(driver) + str_device_driver_key = key_to_str([db_device_pk, orm_driver.name]) + driver_config = { + "driver_uuid": str(uuid.uuid4()), + "device_uuid": db_device_pk, + "driver": orm_driver.name + } + db_device_driver = DriverModel(**driver_config) + db_device_driver.device_fk = db_device + db_device_driver.driver = orm_driver + + self.database.create_or_update(db_device_driver) + + def update_config( + self, db_parent_pk: str, config_name: str, + raw_config_rules: List[Tuple[ORM_ConfigActionEnum, str, str]] + ) -> List[Tuple[Union[ConfigModel, ConfigRuleModel], bool]]: + + str_config_key = key_to_str([db_parent_pk, config_name], separator=':') + result = self.database.get_or_create(ConfigModel, db_parent_pk) + db_config, created = result + + LOGGER.info('UPDATED-CONFIG: {}'.format(db_config.dump())) + + db_objects: List[Tuple[Union[ConfigModel, ConfigRuleModel], bool]] = [(db_config, created)] + + for position, (action, resource_key, resource_value) in enumerate(raw_config_rules): + if action == ORM_ConfigActionEnum.SET: + result : Tuple[ConfigRuleModel, bool] = self.set_config_rule( + db_config, position, resource_key, resource_value) + db_config_rule, updated = result + db_objects.append((db_config_rule, updated)) + elif action == ORM_ConfigActionEnum.DELETE: + self.delete_config_rule(db_config, resource_key) + else: + msg = 'Unsupported action({:s}) for resource_key({:s})/resource_value({:s})' + raise AttributeError( + msg.format(str(ConfigActionEnum.Name(action)), str(resource_key), str(resource_value))) + + return db_objects + + def set_config_rule(self, db_config: ConfigModel, position: int, resource_key: str, resource_value: str, + ): # -> Tuple[ConfigRuleModel, bool]: + + from src.context.service.database.Tools import fast_hasher + str_rule_key_hash = fast_hasher(resource_key) + str_config_rule_key = key_to_str([db_config.config_uuid, str_rule_key_hash], separator=':') + pk = str(uuid.uuid5(uuid.UUID('9566448d-e950-425e-b2ae-7ead656c7e47'), str_config_rule_key)) + data = {'config_rule_uuid': pk, 'config_uuid': db_config.config_uuid, 'position': position, + 'action': ORM_ConfigActionEnum.SET, 'key': resource_key, 'value': resource_value} + to_add = ConfigRuleModel(**data) + + result, updated = self.database.create_or_update(to_add) + return result, updated + + def delete_config_rule( + self, db_config: ConfigModel, resource_key: str + ) -> None: + + from src.context.service.database.Tools import fast_hasher + str_rule_key_hash = fast_hasher(resource_key) + str_config_rule_key = key_to_str([db_config.pk, str_rule_key_hash], separator=':') + + db_config_rule = self.database.get_object(ConfigRuleModel, str_config_rule_key, raise_if_not_found=False) + + if db_config_rule is None: + return + db_config_rule.delete() + + def delete_all_config_rules(self, db_config: ConfigModel) -> None: + + db_config_rule_pks = db_config.references(ConfigRuleModel) + for pk, _ in db_config_rule_pks: ConfigRuleModel(self.database, pk).delete() + + """ + for position, (action, resource_key, resource_value) in enumerate(raw_config_rules): + if action == ORM_ConfigActionEnum.SET: + result: Tuple[ConfigRuleModel, bool] = set_config_rule( + database, db_config, position, resource_key, resource_value) + db_config_rule, updated = result + db_objects.append((db_config_rule, updated)) + elif action == ORM_ConfigActionEnum.DELETE: + delete_config_rule(database, db_config, resource_key) + else: + msg = 'Unsupported action({:s}) for resource_key({:s})/resource_value({:s})' + raise AttributeError( + msg.format(str(ConfigActionEnum.Name(action)), str(resource_key), str(resource_value))) + + return db_objects + """ @safe_and_metered_rpc_method(METRICS, LOGGER) def RemoveDevice(self, request: DeviceId, context : grpc.ServicerContext) -> Empty: @@ -360,6 +471,9 @@ class ContextServiceServicerImpl(ContextServiceServicer): yield DeviceEvent(**json.loads(message.content)) + + """ + # ----- Link ------------------------------------------------------------------------------------------------------- @safe_and_metered_rpc_method(METRICS, LOGGER) diff --git a/src/context/tests/Objects.py b/src/context/tests/Objects.py index 519a0093a..772da38e0 100644 --- a/src/context/tests/Objects.py +++ b/src/context/tests/Objects.py @@ -45,12 +45,17 @@ PACKET_PORT_SAMPLE_TYPES = [ # ----- Device --------------------------------------------------------------------------------------------------------- -DEVICE_R1_UUID = 'R1' +EP2 = '7eb80584-2587-4e71-b10c-f3a5c48e84ab' +EP3 = '368baf47-0540-4ab4-add8-a19b5167162c' +EP100 = '6a923121-36e1-4b5e-8cd6-90aceca9b5cf' + + +DEVICE_R1_UUID = 'fe83a200-6ded-47b4-b156-3bb3556a10d6' DEVICE_R1_ID = json_device_id(DEVICE_R1_UUID) DEVICE_R1_EPS = [ - json_endpoint(DEVICE_R1_ID, 'EP2', '10G', topology_id=TOPOLOGY_ID, kpi_sample_types=PACKET_PORT_SAMPLE_TYPES), - json_endpoint(DEVICE_R1_ID, 'EP3', '10G', topology_id=TOPOLOGY_ID, kpi_sample_types=PACKET_PORT_SAMPLE_TYPES), - json_endpoint(DEVICE_R1_ID, 'EP100', '10G', topology_id=TOPOLOGY_ID, kpi_sample_types=PACKET_PORT_SAMPLE_TYPES), + json_endpoint(DEVICE_R1_ID, EP2, '10G', topology_id=TOPOLOGY_ID, kpi_sample_types=PACKET_PORT_SAMPLE_TYPES), + json_endpoint(DEVICE_R1_ID, EP3, '10G', topology_id=TOPOLOGY_ID, kpi_sample_types=PACKET_PORT_SAMPLE_TYPES), + json_endpoint(DEVICE_R1_ID, EP100, '10G', topology_id=TOPOLOGY_ID, kpi_sample_types=PACKET_PORT_SAMPLE_TYPES), ] DEVICE_R1_RULES = [ json_config_rule_set('dev/rsrc1/value', 'value1'), diff --git a/src/context/tests/test_unitary.py b/src/context/tests/test_unitary.py index e202de498..f238e95d9 100644 --- a/src/context/tests/test_unitary.py +++ b/src/context/tests/test_unitary.py @@ -20,7 +20,6 @@ from common.Settings import ( ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, ENVVAR_SUFIX_SERVICE_PORT_HTTP, get_env_var_name, get_service_baseurl_http, get_service_port_grpc, get_service_port_http) from context.service.Database import Database -from common.orm.Factory import get_database_backend, BackendEnum as DatabaseBackendEnum from common.message_broker.Factory import get_messagebroker_backend, BackendEnum as MessageBrokerBackendEnum from common.message_broker.MessageBroker import MessageBroker from common.proto.context_pb2 import ( @@ -84,7 +83,7 @@ def context_s_mb(request) -> Tuple[Session, MessageBroker]: msg = 'Running scenario {:s} db_session={:s}, mb_backend={:s}, mb_settings={:s}...' LOGGER.info(msg.format(str(name), str(db_session), str(mb_backend.value), str(mb_settings))) - db_uri = 'cockroachdb://root@10.152.183.66:26257/defaultdb?sslmode=disable' + db_uri = 'cockroachdb://root@10.152.183.111:26257/defaultdb?sslmode=disable' LOGGER.debug('Connecting to DB: {}'.format(db_uri)) try: @@ -95,7 +94,7 @@ def context_s_mb(request) -> Tuple[Session, MessageBroker]: return 1 Base.metadata.create_all(engine) - _session = sessionmaker(bind=engine) + _session = sessionmaker(bind=engine, expire_on_commit=False) _message_broker = MessageBroker(get_messagebroker_backend(backend=mb_backend, **mb_settings)) yield _session, _message_broker @@ -164,7 +163,7 @@ def test_grpc_context( assert len(response.contexts) == 0 # ----- Dump state of database before create the object ------------------------------------------------------------ - db_entries = database.query_all(ContextModel) + db_entries = database.get_all(ContextModel) LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) for db_entry in db_entries: LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover @@ -214,7 +213,7 @@ def test_grpc_context( assert event.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID # ----- Dump state of database after create/update the object ------------------------------------------------------ - db_entries = database.query_all(ContextModel) + db_entries = database.get_all(ContextModel) LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) # for db_entry in db_entries: @@ -252,7 +251,7 @@ def test_grpc_context( events_collector.stop() # ----- Dump state of database after remove the object ------------------------------------------------------------- - db_entries = database.query_all(ContextModel) + db_entries = database.get_all(ContextModel) LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) # for db_entry in db_entries: @@ -295,7 +294,7 @@ def test_grpc_topology( assert len(response.topologies) == 0 # ----- Dump state of database before create the object ------------------------------------------------------------ - db_entries = database.query_all(TopologyModel) + db_entries = database.get_all(TopologyModel) LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) # for db_entry in db_entries: # LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover @@ -337,7 +336,7 @@ def test_grpc_topology( # assert event.topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID # ----- Dump state of database after create/update the object ------------------------------------------------------ - db_entries = database.query_all(TopologyModel) + db_entries = database.get_all(TopologyModel) LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) # for db_entry in db_entries: # LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover @@ -384,22 +383,22 @@ def test_grpc_topology( # events_collector.stop() # ----- Dump state of database after remove the object ------------------------------------------------------------- - db_entries = database.query_all(TopologyModel) + db_entries = database.get_all(TopologyModel) LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) # for db_entry in db_entries: # LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover LOGGER.info('-----------------------------------------------------------') assert len(db_entries) == 0 - """ - def test_grpc_device( - context_client_grpc : ContextClient, # pylint: disable=redefined-outer-name - context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name - context_database = context_db_mb[0] + context_client_grpc: ContextClient, # pylint: disable=redefined-outer-name + context_s_mb: Tuple[Session, MessageBroker]): # pylint: disable=redefined-outer-name + session = context_s_mb[0] + + database = Database(session) # ----- Clean the database ----------------------------------------------------------------------------------------- - context_database.clear_all() + database.clear() # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- events_collector = EventsCollector(context_client_grpc) @@ -438,49 +437,49 @@ def test_grpc_device( assert len(response.devices) == 0 # ----- Dump state of database before create the object ------------------------------------------------------------ - db_entries = context_database.dump() + db_entries = database.dump_all() LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover + # for db_entry in db_entries: + # LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 5 + assert len(db_entries) == 2 # ----- Create the object ------------------------------------------------------------------------------------------ with pytest.raises(grpc.RpcError) as e: WRONG_DEVICE = copy.deepcopy(DEVICE_R1) - WRONG_DEVICE['device_endpoints'][0]['endpoint_id']['device_id']['device_uuid']['uuid'] = 'wrong-device-uuid' + WRONG_DEVICE_UUID = '3f03c76d-31fb-47f5-9c1d-bc6b6bfa2d08' + WRONG_DEVICE['device_endpoints'][0]['endpoint_id']['device_id']['device_uuid']['uuid'] = WRONG_DEVICE_UUID context_client_grpc.SetDevice(Device(**WRONG_DEVICE)) assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT - msg = 'request.device_endpoints[0].device_id.device_uuid.uuid(wrong-device-uuid) is invalid; '\ - 'should be == request.device_id.device_uuid.uuid({:s})'.format(DEVICE_R1_UUID) + msg = 'request.device_endpoints[0].device_id.device_uuid.uuid({}) is invalid; '\ + 'should be == request.device_id.device_uuid.uuid({})'.format(WRONG_DEVICE_UUID, DEVICE_R1_UUID) assert e.value.details() == msg - response = context_client_grpc.SetDevice(Device(**DEVICE_R1)) assert response.device_uuid.uuid == DEVICE_R1_UUID # ----- Check create event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True) - assert isinstance(event, DeviceEvent) - assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert event.device_id.device_uuid.uuid == DEVICE_R1_UUID + # event = events_collector.get_event(block=True) + # assert isinstance(event, DeviceEvent) + # assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE + # assert event.device_id.device_uuid.uuid == DEVICE_R1_UUID # ----- Update the object ------------------------------------------------------------------------------------------ response = context_client_grpc.SetDevice(Device(**DEVICE_R1)) assert response.device_uuid.uuid == DEVICE_R1_UUID # ----- Check update event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True) - assert isinstance(event, DeviceEvent) - assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - assert event.device_id.device_uuid.uuid == DEVICE_R1_UUID + # event = events_collector.get_event(block=True) + # assert isinstance(event, DeviceEvent) + # assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + # assert event.device_id.device_uuid.uuid == DEVICE_R1_UUID # ----- Dump state of database after create/update the object ------------------------------------------------------ - db_entries = context_database.dump() + db_entries = database.dump_all() LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover + # for db_entry in db_entries: + # LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 40 + assert len(db_entries) == 36 # ----- Get when the object exists --------------------------------------------------------------------------------- response = context_client_grpc.GetDevice(DeviceId(**DEVICE_R1_ID)) @@ -513,11 +512,11 @@ def test_grpc_device( assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID # ----- Check update event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True) - assert isinstance(event, TopologyEvent) - assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + # event = events_collector.get_event(block=True) + # assert isinstance(event, TopologyEvent) + # assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + # assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + # assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID # ----- Check relation was created --------------------------------------------------------------------------------- response = context_client_grpc.GetTopology(TopologyId(**TOPOLOGY_ID)) @@ -528,12 +527,12 @@ def test_grpc_device( assert len(response.link_ids) == 0 # ----- Dump state of database after creating the object relation -------------------------------------------------- - db_entries = context_database.dump() + db_entries = database.dump_all() LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover + # for db_entry in db_entries: + # LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 40 + assert len(db_entries) == 33 # ----- Remove the object ------------------------------------------------------------------------------------------ context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R1_ID)) @@ -541,33 +540,33 @@ def test_grpc_device( context_client_grpc.RemoveContext(ContextId(**CONTEXT_ID)) # ----- Check remove event ----------------------------------------------------------------------------------------- - events = events_collector.get_events(block=True, count=3) + # events = events_collector.get_events(block=True, count=3) - assert isinstance(events[0], DeviceEvent) - assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[0].device_id.device_uuid.uuid == DEVICE_R1_UUID + # assert isinstance(events[0], DeviceEvent) + # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + # assert events[0].device_id.device_uuid.uuid == DEVICE_R1_UUID - assert isinstance(events[1], TopologyEvent) - assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[1].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert events[1].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + # assert isinstance(events[1], TopologyEvent) + # assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + # assert events[1].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + # assert events[1].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - assert isinstance(events[2], ContextEvent) - assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[2].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + # assert isinstance(events[2], ContextEvent) + # assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + # assert events[2].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- - events_collector.stop() + # events_collector.stop() # ----- Dump state of database after remove the object ------------------------------------------------------------- - db_entries = context_database.dump() + db_entries = database.dump_all() LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover + # for db_entry in db_entries: + # LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover LOGGER.info('-----------------------------------------------------------') assert len(db_entries) == 0 - + """ def test_grpc_link( context_client_grpc : ContextClient, # pylint: disable=redefined-outer-name context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name -- GitLab From cf2a36a27774250be9cadbf3eef57dbc8e8af1e2 Mon Sep 17 00:00:00 2001 From: cmanso <cmanso@protonmail.com> Date: Fri, 2 Dec 2022 15:53:02 +0100 Subject: [PATCH 007/158] Update scalability --- src/context/service/Database.py | 17 +- src/context/service/database/ConfigModel.py | 6 +- src/context/service/database/ContextModel.py | 3 +- src/context/service/database/DeviceModel.py | 42 ++- src/context/service/database/EndPointModel.py | 28 +- .../grpc_server/ContextServiceServicerImpl.py | 249 ++++++++++-------- 6 files changed, 201 insertions(+), 144 deletions(-) diff --git a/src/context/service/Database.py b/src/context/service/Database.py index 8fae9f652..bf970b356 100644 --- a/src/context/service/Database.py +++ b/src/context/service/Database.py @@ -1,7 +1,7 @@ from typing import Tuple, List from sqlalchemy import MetaData -from sqlalchemy.orm import Session +from sqlalchemy.orm import Session, joinedload from context.service.database.Base import Base import logging from common.orm.backend.Tools import key_to_str @@ -27,8 +27,11 @@ class Database(Session): def create_or_update(self, model): with self.session() as session: att = getattr(model, model.main_pk_name()) + obj = self.get_object(type(model), att) + filt = {model.main_pk_name(): att} - found = session.query(type(model)).filter_by(**filt).one_or_none() + t_model = type(model) + found = session.query(t_model).filter_by(**filt).one_or_none() if found: found = True else: @@ -36,6 +39,9 @@ class Database(Session): session.merge(model) session.commit() + + obj = self.get_object(t_model, att) + return model, found def create(self, model): @@ -93,11 +99,11 @@ class Database(Session): raise NotFoundException(model_class.__name__.replace('Model', ''), main_key) return get - def get_or_create(self, model_class: Base, key_parts: List[str] - ) -> Tuple[Base, bool]: + def get_or_create(self, model_class: Base, key_parts: List[str], filt=None) -> Tuple[Base, bool]: str_key = key_to_str(key_parts) - filt = {model_class.main_pk_name(): key_parts} + if not filt: + filt = {model_class.main_pk_name(): key_parts} with self.session() as session: get = session.query(model_class).filter_by(**filt).one_or_none() if get: @@ -105,7 +111,6 @@ class Database(Session): else: obj = model_class() setattr(obj, model_class.main_pk_name(), str_key) - LOGGER.info(obj.dump()) session.add(obj) session.commit() return obj, True diff --git a/src/context/service/database/ConfigModel.py b/src/context/service/database/ConfigModel.py index 4dcd50c2c..40069185f 100644 --- a/src/context/service/database/ConfigModel.py +++ b/src/context/service/database/ConfigModel.py @@ -40,7 +40,7 @@ class ConfigModel(Base): # pylint: disable=abstract-method config_uuid = Column(UUID(as_uuid=False), primary_key=True) # Relationships - config_rule = relationship("ConfigRuleModel", back_populates="config", lazy="dynamic") + config_rule = relationship("ConfigRuleModel", back_populates="config", lazy='joined') def delete(self) -> None: @@ -48,7 +48,7 @@ class ConfigModel(Base): # pylint: disable=abstract-method for pk,_ in db_config_rule_pks: ConfigRuleModel(self.database, pk).delete() super().delete() - def dump(self): # -> List[Dict]: + def dump(self) -> List[Dict]: config_rules = [] for a in self.config_rule: asdf = a.dump() @@ -62,7 +62,7 @@ class ConfigModel(Base): # pylint: disable=abstract-method class ConfigRuleModel(Base): # pylint: disable=abstract-method __tablename__ = 'ConfigRule' config_rule_uuid = Column(UUID(as_uuid=False), primary_key=True) - config_uuid = Column(UUID(as_uuid=False), ForeignKey("Config.config_uuid"), primary_key=True) + config_uuid = Column(UUID(as_uuid=False), ForeignKey("Config.config_uuid", ondelete='CASCADE'), primary_key=True) action = Column(Enum(ORM_ConfigActionEnum, create_constraint=True, native_enum=True), nullable=False) position = Column(INTEGER, nullable=False) diff --git a/src/context/service/database/ContextModel.py b/src/context/service/database/ContextModel.py index ef1d485be..cde774fe4 100644 --- a/src/context/service/database/ContextModel.py +++ b/src/context/service/database/ContextModel.py @@ -33,7 +33,8 @@ class ContextModel(Base): def dump_id(self) -> Dict: return {'context_uuid': {'uuid': self.context_uuid}} - def main_pk_name(self): + @staticmethod + def main_pk_name(): return 'context_uuid' """ diff --git a/src/context/service/database/DeviceModel.py b/src/context/service/database/DeviceModel.py index bf8f73c79..122da50af 100644 --- a/src/context/service/database/DeviceModel.py +++ b/src/context/service/database/DeviceModel.py @@ -49,14 +49,16 @@ class DeviceModel(Base): __tablename__ = 'Device' device_uuid = Column(UUID(as_uuid=False), primary_key=True) device_type = Column(String) - device_config_uuid = Column(UUID(as_uuid=False), ForeignKey("Config.config_uuid")) + device_config_uuid = Column(UUID(as_uuid=False), ForeignKey("Config.config_uuid", ondelete='CASCADE')) device_operational_status = Column(Enum(ORM_DeviceOperationalStatusEnum, create_constraint=False, native_enum=False)) # Relationships - device_config = relationship("ConfigModel", lazy="joined") - driver = relationship("DriverModel", lazy="joined") - endpoints = relationship("EndPointModel", lazy="joined") + device_config = relationship("ConfigModel", passive_deletes="all, delete", lazy="joined") + driver = relationship("DriverModel", passive_deletes=True, back_populates="device") + endpoints = relationship("EndPointModel", passive_deletes=True, back_populates="device") + + # topology = relationship("TopologyModel", lazy="joined") # def delete(self) -> None: # # pylint: disable=import-outside-toplevel @@ -83,13 +85,25 @@ class DeviceModel(Base): return self.device_config.dump() def dump_drivers(self) -> List[int]: - return self.driver.dump() + response = [] + + for a in self.driver: + LOGGER.info('DUMPPPPPPPPPPPPPPPPPPPPPIIIIIIIIIIIIIIIIIIIIIIINNNNNNNNNNNNNNNGGGGGGGGGGGGGGGGGGg') + LOGGER.info('aasdfadsf: {}'.format(a.dump())) + response.append(a.dump()) + + return response def dump_endpoints(self) -> List[Dict]: - return self.endpoints.dump() + response = [] + + for a in self.endpoints: + response.append(a.dump()) + + return response def dump( # pylint: disable=arguments-differ - self, include_config_rules=True, include_drivers=False, include_endpoints=False + self, include_config_rules=True, include_drivers=True, include_endpoints=True ) -> Dict: result = { 'device_id': self.dump_id(), @@ -101,24 +115,26 @@ class DeviceModel(Base): if include_endpoints: result['device_endpoints'] = self.dump_endpoints() return result - def main_pk_name(self): + @staticmethod + def main_pk_name(): return 'device_uuid' class DriverModel(Base): # pylint: disable=abstract-method __tablename__ = 'Driver' - driver_uuid = Column(UUID(as_uuid=False), primary_key=True) - device_uuid = Column(UUID(as_uuid=False), ForeignKey("Device.device_uuid"), primary_key=True) + # driver_uuid = Column(UUID(as_uuid=False), primary_key=True) + device_uuid = Column(UUID(as_uuid=False), ForeignKey("Device.device_uuid", ondelete='CASCADE'), primary_key=True) driver = Column(Enum(ORM_DeviceDriverEnum, create_constraint=False, native_enum=False)) # Relationships - device = relationship("DeviceModel") + device = relationship("DeviceModel", back_populates="driver") def dump(self) -> Dict: return self.driver.value - def main_pk_name(self): - return 'driver_uuid' + @staticmethod + def main_pk_name(): + return 'device_uuid' def set_drivers(database : Database, db_device : DeviceModel, grpc_device_drivers): db_device_pk = db_device.device_uuid diff --git a/src/context/service/database/EndPointModel.py b/src/context/service/database/EndPointModel.py index 669b590e3..a4381a2e3 100644 --- a/src/context/service/database/EndPointModel.py +++ b/src/context/service/database/EndPointModel.py @@ -27,14 +27,17 @@ LOGGER = logging.getLogger(__name__) class EndPointModel(Base): __tablename__ = 'EndPoint' - endpoint_uuid = Column(UUID(as_uuid=False), primary_key=True, unique=True) topology_uuid = Column(UUID(as_uuid=False), ForeignKey("Topology.topology_uuid"), primary_key=True) - device_uuid = Column(UUID(as_uuid=False), ForeignKey("Device.device_uuid"), primary_key=True) + device_uuid = Column(UUID(as_uuid=False), ForeignKey("Device.device_uuid", ondelete='CASCADE'), primary_key=True) + endpoint_uuid = Column(UUID(as_uuid=False), primary_key=True, unique=True) endpoint_type = Column(String) # Relationships + kpi_sample_types = relationship("KpiSampleTypeModel", passive_deletes=True, back_populates="EndPoint") + device = relationship("DeviceModel", back_populates="endpoints") - def main_pk_name(self): + @staticmethod + def main_pk_name(): return 'endpoint_uuid' def delete(self) -> None: @@ -44,32 +47,41 @@ class EndPointModel(Base): def dump_id(self) -> Dict: result = { - 'device_uuid': self.device_uuid, + 'device_id': self.device.dump_id(), 'endpoint_uuid': {'uuid': self.endpoint_uuid}, } return result def dump_kpi_sample_types(self) -> List[int]: - db_kpi_sample_type_pks = self.references(KpiSampleTypeModel) - return [KpiSampleTypeModel(self.database, pk).dump() for pk,_ in db_kpi_sample_type_pks] + # db_kpi_sample_type_pks = self.references(KpiSampleTypeModel) + # return [KpiSampleTypeModel(self.database, pk).dump() for pk,_ in db_kpi_sample_type_pks] + response = [] + for a in self.kpi_sample_types: + response.append(a.dump()) + return response def dump( # pylint: disable=arguments-differ self, include_kpi_sample_types=True ) -> Dict: result = { - 'endpoint_uuid': self.dump_id(), + 'endpoint_id': self.dump_id(), 'endpoint_type': self.endpoint_type, } if include_kpi_sample_types: result['kpi_sample_types'] = self.dump_kpi_sample_types() return result + class KpiSampleTypeModel(Base): # pylint: disable=abstract-method __tablename__ = 'KpiSampleType' kpi_uuid = Column(UUID(as_uuid=False), primary_key=True) - endpoint_uuid = Column(UUID(as_uuid=False), ForeignKey("EndPoint.endpoint_uuid")) + endpoint_uuid = Column(UUID(as_uuid=False), ForeignKey("EndPoint.endpoint_uuid", ondelete='CASCADE')) kpi_sample_type = Column(Enum(ORM_KpiSampleTypeEnum, create_constraint=False, native_enum=False)) # __table_args__ = (ForeignKeyConstraint([endpoint_uuid], [EndPointModel.endpoint_uuid]), {}) + + # Relationships + EndPoint = relationship("EndPointModel", passive_deletes=True, back_populates="kpi_sample_types") + def dump(self) -> Dict: return self.kpi_sample_type.value diff --git a/src/context/service/grpc_server/ContextServiceServicerImpl.py b/src/context/service/grpc_server/ContextServiceServicerImpl.py index d104d5567..108ab9950 100644 --- a/src/context/service/grpc_server/ContextServiceServicerImpl.py +++ b/src/context/service/grpc_server/ContextServiceServicerImpl.py @@ -46,7 +46,6 @@ from context.service.database.ConnectionModel import ConnectionModel, set_path from context.service.database.ConstraintModel import set_constraints from context.service.database.EndPointModel import EndPointModel, set_kpi_sample_types from context.service.database.Events import notify_event -from context.service.database.LinkModel import LinkModel from context.service.database.RelationModels import ( ConnectionSubServiceModel, LinkEndPointModel, ServiceEndPointModel, SliceEndPointModel, SliceServiceModel, SliceSubSliceModel, TopologyDeviceModel, TopologyLinkModel) @@ -60,6 +59,7 @@ from context.service.database.TopologyModel import TopologyModel from context.service.database.Events import notify_event from context.service.database.EndPointModel import EndPointModel from context.service.database.EndPointModel import KpiSampleTypeModel +from context.service.database.LinkModel import LinkModel from .Constants import ( CONSUME_TIMEOUT, TOPIC_CONNECTION, TOPIC_CONTEXT, TOPIC_DEVICE, TOPIC_LINK, TOPIC_SERVICE, TOPIC_SLICE, @@ -268,7 +268,7 @@ class ContextServiceServicerImpl(ContextServiceServicer): def ListDevices(self, request: Empty, context : grpc.ServicerContext) -> DeviceList: with self.session() as session: result = session.query(DeviceModel).all() - return DeviceList(devices=[device.dump_id() for device in result]) + return DeviceList(devices=[device.dump() for device in result]) @safe_and_metered_rpc_method(METRICS, LOGGER) def GetDevice(self, request: DeviceId, context : grpc.ServicerContext) -> Device: @@ -278,72 +278,76 @@ class ContextServiceServicerImpl(ContextServiceServicer): if not result: raise NotFoundException(DeviceModel.__name__.replace('Model', ''), device_uuid) - rd = result.dump() + rd = result.dump(include_config_rules=True, include_drivers=True, include_endpoints=True) + rt = Device(**rd) return rt @safe_and_metered_rpc_method(METRICS, LOGGER) def SetDevice(self, request: Device, context : grpc.ServicerContext) -> DeviceId: - device_uuid = request.device_id.device_uuid.uuid + with self.session() as session: + device_uuid = request.device_id.device_uuid.uuid - for i,endpoint in enumerate(request.device_endpoints): - endpoint_device_uuid = endpoint.endpoint_id.device_id.device_uuid.uuid - if len(endpoint_device_uuid) == 0: endpoint_device_uuid = device_uuid - if device_uuid != endpoint_device_uuid: - raise InvalidArgumentException( - 'request.device_endpoints[{:d}].device_id.device_uuid.uuid'.format(i), endpoint_device_uuid, - ['should be == {:s}({:s})'.format('request.device_id.device_uuid.uuid', device_uuid)]) - - config_rules = grpc_config_rules_to_raw(request.device_config.config_rules) - running_config_result = self.update_config(device_uuid, 'running', config_rules) - db_running_config = running_config_result[0][0] - config_uuid = db_running_config.config_uuid - - new_obj = DeviceModel(**{ - 'device_uuid' : device_uuid, - 'device_type' : request.device_type, - 'device_operational_status' : grpc_to_enum__device_operational_status(request.device_operational_status), - 'device_config_uuid' : config_uuid, - }) - result: Tuple[DeviceModel, bool] = self.database.create_or_update(new_obj) - db_device, updated = result + for i,endpoint in enumerate(request.device_endpoints): + endpoint_device_uuid = endpoint.endpoint_id.device_id.device_uuid.uuid + if len(endpoint_device_uuid) == 0: endpoint_device_uuid = device_uuid + if device_uuid != endpoint_device_uuid: + raise InvalidArgumentException( + 'request.device_endpoints[{:d}].device_id.device_uuid.uuid'.format(i), endpoint_device_uuid, + ['should be == {:s}({:s})'.format('request.device_id.device_uuid.uuid', device_uuid)]) - self.set_drivers(db_device, request.device_drivers) + config_rules = grpc_config_rules_to_raw(request.device_config.config_rules) + running_config_result = self.update_config(session, device_uuid, 'running', config_rules) + db_running_config = running_config_result[0][0] + config_uuid = db_running_config.config_uuid - for i,endpoint in enumerate(request.device_endpoints): - endpoint_uuid = endpoint.endpoint_id.endpoint_uuid.uuid - endpoint_device_uuid = endpoint.endpoint_id.device_id.device_uuid.uuid - if len(endpoint_device_uuid) == 0: endpoint_device_uuid = device_uuid + new_obj = DeviceModel(**{ + 'device_uuid' : device_uuid, + 'device_type' : request.device_type, + 'device_operational_status' : grpc_to_enum__device_operational_status(request.device_operational_status), + 'device_config_uuid' : config_uuid, + }) + result: Tuple[DeviceModel, bool] = self.database.create_or_update(new_obj) + db_device, updated = result - str_endpoint_key = key_to_str([device_uuid, endpoint_uuid]) - endpoint_attributes = { - 'device_uuid' : db_device.device_uuid, - 'endpoint_uuid': endpoint_uuid, - 'endpoint_type': endpoint.endpoint_type, - } + self.set_drivers(db_device, request.device_drivers) - endpoint_topology_context_uuid = endpoint.endpoint_id.topology_id.context_id.context_uuid.uuid - endpoint_topology_uuid = endpoint.endpoint_id.topology_id.topology_uuid.uuid - if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0: - str_topology_key = key_to_str([endpoint_topology_context_uuid, endpoint_topology_uuid]) + for i,endpoint in enumerate(request.device_endpoints): + endpoint_uuid = endpoint.endpoint_id.endpoint_uuid.uuid + endpoint_device_uuid = endpoint.endpoint_id.device_id.device_uuid.uuid + if len(endpoint_device_uuid) == 0: endpoint_device_uuid = device_uuid + + str_endpoint_key = key_to_str([device_uuid, endpoint_uuid]) + endpoint_attributes = { + 'device_uuid' : db_device.device_uuid, + 'endpoint_uuid': endpoint_uuid, + 'endpoint_type': endpoint.endpoint_type, + } + + endpoint_topology_context_uuid = endpoint.endpoint_id.topology_id.context_id.context_uuid.uuid + endpoint_topology_uuid = endpoint.endpoint_id.topology_id.topology_uuid.uuid + if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0: + str_topology_key = key_to_str([endpoint_topology_context_uuid, endpoint_topology_uuid]) - db_topology : TopologyModel = self.database.get_object(TopologyModel, endpoint_topology_uuid) + db_topology: TopologyModel = self.database.get_object(TopologyModel, endpoint_topology_uuid) + new_topo = TopologyModel(context_uuid=db_topology.context_uuid, topology_uuid=db_topology.topology_uuid, device_uuids=db_device.device_uuid) - str_endpoint_key = key_to_str([str_endpoint_key, str_topology_key], separator=':') - endpoint_attributes['topology_uuid'] = db_topology.topology_uuid + self.database.create_or_update(new_topo) - new_endpoint = EndPointModel(**endpoint_attributes) - result : Tuple[EndPointModel, bool] = self.database.create_or_update(new_endpoint) - db_endpoint, updated = result + endpoint_attributes['topology_uuid'] = db_topology.topology_uuid - self.set_kpi_sample_types(db_endpoint, endpoint.kpi_sample_types) + new_endpoint = EndPointModel(**endpoint_attributes) + result : Tuple[EndPointModel, bool] = self.database.create_or_update(new_endpoint) + db_endpoint, updated = result - # event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE - dict_device_id = db_device.dump_id() - # notify_event(self.messagebroker, TOPIC_DEVICE, event_type, {'device_id': dict_device_id}) + self.set_kpi_sample_types(db_endpoint, endpoint.kpi_sample_types) - return DeviceId(**dict_device_id) + # event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE + dict_device_id = db_device.dump_id() + # notify_event(self.messagebroker, TOPIC_DEVICE, event_type, {'device_id': dict_device_id}) + + return DeviceId(**dict_device_id) def set_kpi_sample_types(self, db_endpoint: EndPointModel, grpc_endpoint_kpi_sample_types): db_endpoint_pk = db_endpoint.endpoint_uuid @@ -362,7 +366,7 @@ class ContextServiceServicerImpl(ContextServiceServicer): orm_driver = grpc_to_enum__device_driver(driver) str_device_driver_key = key_to_str([db_device_pk, orm_driver.name]) driver_config = { - "driver_uuid": str(uuid.uuid4()), + # "driver_uuid": str(uuid.uuid4()), "device_uuid": db_device_pk, "driver": orm_driver.name } @@ -373,13 +377,19 @@ class ContextServiceServicerImpl(ContextServiceServicer): self.database.create_or_update(db_device_driver) def update_config( - self, db_parent_pk: str, config_name: str, + self, session, db_parent_pk: str, config_name: str, raw_config_rules: List[Tuple[ORM_ConfigActionEnum, str, str]] ) -> List[Tuple[Union[ConfigModel, ConfigRuleModel], bool]]: - str_config_key = key_to_str([db_parent_pk, config_name], separator=':') - result = self.database.get_or_create(ConfigModel, db_parent_pk) - db_config, created = result + created = False + + db_config = session.query(ConfigModel).filter_by(**{ConfigModel.main_pk_name(): db_parent_pk}).one_or_none() + if not db_config: + db_config = ConfigModel() + setattr(db_config, ConfigModel.main_pk_name(), db_parent_pk) + session.add(db_config) + session.commit() + created = True LOGGER.info('UPDATED-CONFIG: {}'.format(db_config.dump())) @@ -452,15 +462,16 @@ class ContextServiceServicerImpl(ContextServiceServicer): @safe_and_metered_rpc_method(METRICS, LOGGER) def RemoveDevice(self, request: DeviceId, context : grpc.ServicerContext) -> Empty: - with self.lock: - device_uuid = request.device_uuid.uuid - db_device = DeviceModel(self.database, device_uuid, auto_load=False) - found = db_device.load() - if not found: return Empty() + device_uuid = request.device_uuid.uuid - dict_device_id = db_device.dump_id() - db_device.delete() + with self.session() as session: + result = session.query(DeviceModel).filter_by(device_uuid=device_uuid).one_or_none() + if not result: + return Empty() + dict_device_id = result.dump_id() + session.query(DeviceModel).filter_by(device_uuid=device_uuid).delete() + session.commit() event_type = EventTypeEnum.EVENTTYPE_REMOVE notify_event(self.messagebroker, TOPIC_DEVICE, event_type, {'device_id': dict_device_id}) return Empty() @@ -472,75 +483,86 @@ class ContextServiceServicerImpl(ContextServiceServicer): - """ # ----- Link ------------------------------------------------------------------------------------------------------- @safe_and_metered_rpc_method(METRICS, LOGGER) def ListLinkIds(self, request: Empty, context : grpc.ServicerContext) -> LinkIdList: - with self.lock: - db_links : List[LinkModel] = get_all_objects(self.database, LinkModel) - db_links = sorted(db_links, key=operator.attrgetter('pk')) - return LinkIdList(link_ids=[db_link.dump_id() for db_link in db_links]) + with self.session() as session: + result = session.query(LinkModel).all() + return LinkIdList(link_ids=[db_link.dump_id() for db_link in result]) + @safe_and_metered_rpc_method(METRICS, LOGGER) def ListLinks(self, request: Empty, context : grpc.ServicerContext) -> LinkList: - with self.lock: - db_links : List[LinkModel] = get_all_objects(self.database, LinkModel) - db_links = sorted(db_links, key=operator.attrgetter('pk')) - return LinkList(links=[db_link.dump() for db_link in db_links]) + with self.session() as session: + result = session.query(DeviceModel).all() + return LinkList(links=[db_link.dump() for db_link in result]) @safe_and_metered_rpc_method(METRICS, LOGGER) def GetLink(self, request: LinkId, context : grpc.ServicerContext) -> Link: - with self.lock: - link_uuid = request.link_uuid.uuid - db_link : LinkModel = get_object(self.database, LinkModel, link_uuid) - return Link(**db_link.dump()) + link_uuid = request.link_uuid.uuid + with self.session() as session: + result = session.query(LinkModel).filter(LinkModel.device_uuid == link_uuid).one_or_none() + if not result: + raise NotFoundException(DeviceModel.__name__.replace('Model', ''), link_uuid) - @safe_and_metered_rpc_method(METRICS, LOGGER) - def SetLink(self, request: Link, context : grpc.ServicerContext) -> LinkId: - with self.lock: - link_uuid = request.link_id.link_uuid.uuid - result : Tuple[LinkModel, bool] = update_or_create_object( - self.database, LinkModel, link_uuid, {'link_uuid': link_uuid}) - db_link, updated = result + rd = result.dump() - for endpoint_id in request.link_endpoint_ids: - endpoint_uuid = endpoint_id.endpoint_uuid.uuid - endpoint_device_uuid = endpoint_id.device_id.device_uuid.uuid - endpoint_topology_uuid = endpoint_id.topology_id.topology_uuid.uuid - endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid + rt = Link(**rd) - str_endpoint_key = key_to_str([endpoint_device_uuid, endpoint_uuid]) + return rt - db_topology = None - if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0: - str_topology_key = key_to_str([endpoint_topology_context_uuid, endpoint_topology_uuid]) - db_topology : TopologyModel = get_object(self.database, TopologyModel, str_topology_key) - str_topology_device_key = key_to_str([str_topology_key, endpoint_device_uuid], separator='--') - # check device is in topology - get_object(self.database, TopologyDeviceModel, str_topology_device_key) - str_endpoint_key = key_to_str([str_endpoint_key, str_topology_key], separator=':') - db_endpoint : EndPointModel = get_object(self.database, EndPointModel, str_endpoint_key) - str_link_endpoint_key = key_to_str([link_uuid, endpoint_device_uuid], separator='--') - result : Tuple[LinkEndPointModel, bool] = get_or_create_object( - self.database, LinkEndPointModel, str_link_endpoint_key, { - 'link_fk': db_link, 'endpoint_fk': db_endpoint}) - #db_link_endpoint, link_endpoint_created = result + @safe_and_metered_rpc_method(METRICS, LOGGER) + def SetLink(self, request: Link, context : grpc.ServicerContext) -> LinkId: + link_uuid = request.link_id.link_uuid.uuid - if db_topology is not None: - str_topology_link_key = key_to_str([str_topology_key, link_uuid], separator='--') - result : Tuple[TopologyLinkModel, bool] = get_or_create_object( - self.database, TopologyLinkModel, str_topology_link_key, { - 'topology_fk': db_topology, 'link_fk': db_link}) - #db_topology_link, topology_link_created = result + new_link = LinkModel(**{ + 'lin_uuid': link_uuid + }) + result: Tuple[LinkModel, bool] = self.database.create_or_update(new_link) + db_link, updated = result - event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE - dict_link_id = db_link.dump_id() - notify_event(self.messagebroker, TOPIC_LINK, event_type, {'link_id': dict_link_id}) - return LinkId(**dict_link_id) + for endpoint_id in request.link_endpoint_ids: + endpoint_uuid = endpoint_id.endpoint_uuid.uuid + endpoint_device_uuid = endpoint_id.device_id.device_uuid.uuid + endpoint_topology_uuid = endpoint_id.topology_id.topology_uuid.uuid + endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid + + str_endpoint_key = key_to_str([endpoint_device_uuid, endpoint_uuid]) + + if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0: + str_topology_key = key_to_str([endpoint_topology_context_uuid, endpoint_topology_uuid]) + # db_topology : TopologyModel = get_object(self.database, TopologyModel, str_topology_key) + db_topology : TopologyModel = self.database.get_object(TopologyModel, str_topology_key) + str_topology_device_key = key_to_str([str_topology_key, endpoint_device_uuid], separator='--') + # check device is in topology + # get_object(self.database, TopologyDeviceModel, str_topology_device_key) + # str_endpoint_key = key_to_str([str_endpoint_key, str_topology_key], separator=':') + + # db_endpoint : EndPointModel = get_object(self.database, EndPointModel, str_endpoint_key) + LOGGER.info('str_endpoint_key: {}'.format(str_endpoint_key)) + db_endpoint: EndPointModel = self.database.get_object(EndPointModel, str_endpoint_key) + + # str_link_endpoint_key = key_to_str([link_uuid, endpoint_device_uuid], separator='--') + # result : Tuple[LinkEndPointModel, bool] = get_or_create_object( + # self.database, LinkEndPointModel, str_link_endpoint_key, { + # 'link_fk': db_link, 'endpoint_fk': db_endpoint}) + #db_link_endpoint, link_endpoint_created = result + + # if db_topology is not None: + # str_topology_link_key = key_to_str([str_topology_key, link_uuid], separator='--') + # result : Tuple[TopologyLinkModel, bool] = get_or_create_object( + # self.database, TopologyLinkModel, str_topology_link_key, { + # 'topology_fk': db_topology, 'link_fk': db_link}) + # #db_topology_link, topology_link_created = result + + event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE + dict_link_id = db_link.dump_id() + notify_event(self.messagebroker, TOPIC_LINK, event_type, {'link_id': dict_link_id}) + return LinkId(**dict_link_id) @safe_and_metered_rpc_method(METRICS, LOGGER) def RemoveLink(self, request: LinkId, context : grpc.ServicerContext) -> Empty: @@ -562,6 +584,7 @@ class ContextServiceServicerImpl(ContextServiceServicer): for message in self.messagebroker.consume({TOPIC_LINK}, consume_timeout=CONSUME_TIMEOUT): yield LinkEvent(**json.loads(message.content)) + """ # ----- Service ---------------------------------------------------------------------------------------------------- -- GitLab From c47c372f107c6032da2ee1f01776481393704370 Mon Sep 17 00:00:00 2001 From: cmanso <cmanso@protonmail.com> Date: Sun, 11 Dec 2022 15:47:46 +0100 Subject: [PATCH 008/158] Update scalability --- src/context/service/Database.py | 50 +++-- src/context/service/database/ConfigModel.py | 10 +- src/context/service/database/DeviceModel.py | 25 +-- src/context/service/database/EndPointModel.py | 2 +- src/context/service/database/LinkModel.py | 54 +++--- .../service/database/RelationModels.py | 103 ++++++----- src/context/service/database/ServiceModel.py | 28 ++- src/context/service/database/TopologyModel.py | 21 +-- .../grpc_server/ContextServiceServicerImpl.py | 171 ++++++++++++------ src/context/tests/Objects.py | 35 ++-- 10 files changed, 271 insertions(+), 228 deletions(-) diff --git a/src/context/service/Database.py b/src/context/service/Database.py index bf970b356..2b699203a 100644 --- a/src/context/service/Database.py +++ b/src/context/service/Database.py @@ -16,6 +16,9 @@ class Database(Session): super().__init__() self.session = session + def get_session(self): + return self.session + def get_all(self, model): result = [] with self.session() as session: @@ -27,22 +30,21 @@ class Database(Session): def create_or_update(self, model): with self.session() as session: att = getattr(model, model.main_pk_name()) - obj = self.get_object(type(model), att) - filt = {model.main_pk_name(): att} t_model = type(model) - found = session.query(t_model).filter_by(**filt).one_or_none() - if found: + obj = session.query(t_model).filter_by(**filt).one_or_none() + + if obj: + for key in obj.__table__.columns.keys(): + setattr(obj, key, getattr(model, key)) found = True + session.commit() + return obj, found else: found = False - - session.merge(model) - session.commit() - - obj = self.get_object(t_model, att) - - return model, found + session.add(model) + session.commit() + return model, found def create(self, model): with self.session() as session: @@ -85,7 +87,6 @@ class Database(Session): for table in meta.sorted_tables: for row in engine.execute(table.select()): result.append((table.name, dict(row))) - LOGGER.info(result) return result @@ -98,10 +99,27 @@ class Database(Session): if raise_if_not_found: raise NotFoundException(model_class.__name__.replace('Model', ''), main_key) - return get + dump = None + if hasattr(get, 'dump'): + dump = get.dump() + return get, dump + + def get_object_filter(self, model_class: Base, filt, raise_if_not_found=False): + with self.session() as session: + get = session.query(model_class).filter_by(**filt).all() + + if not get: + if raise_if_not_found: + raise NotFoundException(model_class.__name__.replace('Model', '')) + else: + return None, None + + if isinstance(get, list): + return get, [obj.dump() for obj in get] + + return get, get.dump() - def get_or_create(self, model_class: Base, key_parts: List[str], filt=None) -> Tuple[Base, bool]: - str_key = key_to_str(key_parts) + def get_or_create(self, model_class: Base, key_parts: str, filt=None) -> Tuple[Base, bool]: if not filt: filt = {model_class.main_pk_name(): key_parts} with self.session() as session: @@ -110,7 +128,7 @@ class Database(Session): return get, False else: obj = model_class() - setattr(obj, model_class.main_pk_name(), str_key) + setattr(obj, model_class.main_pk_name(), key_parts) session.add(obj) session.commit() return obj, True diff --git a/src/context/service/database/ConfigModel.py b/src/context/service/database/ConfigModel.py index 40069185f..2ec22985c 100644 --- a/src/context/service/database/ConfigModel.py +++ b/src/context/service/database/ConfigModel.py @@ -40,13 +40,7 @@ class ConfigModel(Base): # pylint: disable=abstract-method config_uuid = Column(UUID(as_uuid=False), primary_key=True) # Relationships - config_rule = relationship("ConfigRuleModel", back_populates="config", lazy='joined') - - - def delete(self) -> None: - db_config_rule_pks = self.references(ConfigRuleModel) - for pk,_ in db_config_rule_pks: ConfigRuleModel(self.database, pk).delete() - super().delete() + config_rule = relationship("ConfigRuleModel", cascade="all,delete", back_populates="config", lazy='joined') def dump(self) -> List[Dict]: config_rules = [] @@ -75,7 +69,7 @@ class ConfigRuleModel(Base): # pylint: disable=abstract-method ) # Relationships - config = relationship("ConfigModel", back_populates="config_rule") + config = relationship("ConfigModel", passive_deletes=True, back_populates="config_rule") def dump(self, include_position=True) -> Dict: # pylint: disable=arguments-differ result = { diff --git a/src/context/service/database/DeviceModel.py b/src/context/service/database/DeviceModel.py index 122da50af..b7e7efed4 100644 --- a/src/context/service/database/DeviceModel.py +++ b/src/context/service/database/DeviceModel.py @@ -54,30 +54,10 @@ class DeviceModel(Base): native_enum=False)) # Relationships - device_config = relationship("ConfigModel", passive_deletes="all, delete", lazy="joined") + device_config = relationship("ConfigModel", passive_deletes=True, lazy="joined") driver = relationship("DriverModel", passive_deletes=True, back_populates="device") endpoints = relationship("EndPointModel", passive_deletes=True, back_populates="device") - # topology = relationship("TopologyModel", lazy="joined") - - # def delete(self) -> None: - # # pylint: disable=import-outside-toplevel - # from .EndPointModel import EndPointModel - # from .RelationModels import TopologyDeviceModel - # - # for db_endpoint_pk,_ in self.references(EndPointModel): - # EndPointModel(self.database, db_endpoint_pk).delete() - # - # for db_topology_device_pk,_ in self.references(TopologyDeviceModel): - # TopologyDeviceModel(self.database, db_topology_device_pk).delete() - # - # for db_driver_pk,_ in self.references(DriverModel): - # DriverModel(self.database, db_driver_pk).delete() - # - # super().delete() - # - # ConfigModel(self.database, self.device_config_fk).delete() - def dump_id(self) -> Dict: return {'device_uuid': {'uuid': self.device_uuid}} @@ -86,10 +66,7 @@ class DeviceModel(Base): def dump_drivers(self) -> List[int]: response = [] - for a in self.driver: - LOGGER.info('DUMPPPPPPPPPPPPPPPPPPPPPIIIIIIIIIIIIIIIIIIIIIIINNNNNNNNNNNNNNNGGGGGGGGGGGGGGGGGGg') - LOGGER.info('aasdfadsf: {}'.format(a.dump())) response.append(a.dump()) return response diff --git a/src/context/service/database/EndPointModel.py b/src/context/service/database/EndPointModel.py index a4381a2e3..fb2c9d26a 100644 --- a/src/context/service/database/EndPointModel.py +++ b/src/context/service/database/EndPointModel.py @@ -20,7 +20,7 @@ from common.orm.backend.Tools import key_to_str from common.proto.context_pb2 import EndPointId from .KpiSampleType import ORM_KpiSampleTypeEnum, grpc_to_enum__kpi_sample_type from sqlalchemy import Column, ForeignKey, String, Enum, ForeignKeyConstraint -from sqlalchemy.dialects.postgresql import UUID, ARRAY +from sqlalchemy.dialects.postgresql import UUID from context.service.database.Base import Base from sqlalchemy.orm import relationship LOGGER = logging.getLogger(__name__) diff --git a/src/context/service/database/LinkModel.py b/src/context/service/database/LinkModel.py index 8f1d971c3..025709dfd 100644 --- a/src/context/service/database/LinkModel.py +++ b/src/context/service/database/LinkModel.py @@ -14,39 +14,39 @@ import logging, operator from typing import Dict, List -from common.orm.fields.PrimaryKeyField import PrimaryKeyField -from common.orm.fields.StringField import StringField -from common.orm.model.Model import Model -from common.orm.HighLevel import get_related_objects +from sqlalchemy import Column, ForeignKey +from sqlalchemy.dialects.postgresql import UUID +from context.service.database.Base import Base +from sqlalchemy.orm import relationship LOGGER = logging.getLogger(__name__) -class LinkModel(Model): - pk = PrimaryKeyField() - link_uuid = StringField(required=True, allow_empty=False) +class LinkModel(Base): + __tablename__ = 'Link' + link_uuid = Column(UUID(as_uuid=False), primary_key=True, unique=True) - def delete(self) -> None: - #pylint: disable=import-outside-toplevel - from .RelationModels import LinkEndPointModel, TopologyLinkModel - - for db_link_endpoint_pk,_ in self.references(LinkEndPointModel): - LinkEndPointModel(self.database, db_link_endpoint_pk).delete() - - for db_topology_link_pk,_ in self.references(TopologyLinkModel): - TopologyLinkModel(self.database, db_topology_link_pk).delete() - - super().delete() + @staticmethod + def main_pk_name(): + return 'link_uuid' def dump_id(self) -> Dict: return {'link_uuid': {'uuid': self.link_uuid}} def dump_endpoint_ids(self) -> List[Dict]: - from .RelationModels import LinkEndPointModel # pylint: disable=import-outside-toplevel - db_endpoints = get_related_objects(self, LinkEndPointModel, 'endpoint_fk') - return [db_endpoint.dump_id() for db_endpoint in sorted(db_endpoints, key=operator.attrgetter('pk'))] - - def dump(self) -> Dict: - return { - 'link_id': self.dump_id(), - 'link_endpoint_ids': self.dump_endpoint_ids(), - } + return [endpoint.dump_id() for endpoint in self.endpoints] + + def dump(self, endpoints=None) -> Dict: + result = { + 'link_id': self.dump_id() + } + if endpoints: + result['link_endpoint_ids'] = [] + for endpoint in endpoints: + dump = endpoint.dump_id() + LOGGER.info(dump) + result['link_endpoint_ids'].append(dump) + + LOGGER.info(result['link_endpoint_ids']) + + LOGGER.info(result) + return result diff --git a/src/context/service/database/RelationModels.py b/src/context/service/database/RelationModels.py index 98b077a77..e69feadc4 100644 --- a/src/context/service/database/RelationModels.py +++ b/src/context/service/database/RelationModels.py @@ -13,55 +13,68 @@ # limitations under the License. import logging -from common.orm.fields.ForeignKeyField import ForeignKeyField -from common.orm.fields.PrimaryKeyField import PrimaryKeyField -from common.orm.model.Model import Model -from .ConnectionModel import ConnectionModel -from .DeviceModel import DeviceModel -from .EndPointModel import EndPointModel -from .LinkModel import LinkModel -from .ServiceModel import ServiceModel -from .SliceModel import SliceModel -from .TopologyModel import TopologyModel +from sqlalchemy import Column, ForeignKey +from sqlalchemy.dialects.postgresql import UUID +from context.service.database.Base import Base LOGGER = logging.getLogger(__name__) +# +# class ConnectionSubServiceModel(Model): # pylint: disable=abstract-method +# pk = PrimaryKeyField() +# connection_fk = ForeignKeyField(ConnectionModel) +# sub_service_fk = ForeignKeyField(ServiceModel) +# +class LinkEndPointModel(Base): # pylint: disable=abstract-method + __tablename__ = 'LinkEndPoint' + # uuid = Column(UUID(as_uuid=False), primary_key=True, unique=True) + link_uuid = Column(UUID(as_uuid=False), ForeignKey("Link.link_uuid")) + endpoint_uuid = Column(UUID(as_uuid=False), ForeignKey("EndPoint.endpoint_uuid"), primary_key=True) -class ConnectionSubServiceModel(Model): # pylint: disable=abstract-method - pk = PrimaryKeyField() - connection_fk = ForeignKeyField(ConnectionModel) - sub_service_fk = ForeignKeyField(ServiceModel) - -class LinkEndPointModel(Model): # pylint: disable=abstract-method - pk = PrimaryKeyField() - link_fk = ForeignKeyField(LinkModel) - endpoint_fk = ForeignKeyField(EndPointModel) - -class ServiceEndPointModel(Model): # pylint: disable=abstract-method - pk = PrimaryKeyField() - service_fk = ForeignKeyField(ServiceModel) - endpoint_fk = ForeignKeyField(EndPointModel) - -class SliceEndPointModel(Model): # pylint: disable=abstract-method - pk = PrimaryKeyField() - slice_fk = ForeignKeyField(SliceModel) - endpoint_fk = ForeignKeyField(EndPointModel) + @staticmethod + def main_pk_name(): + return 'endpoint_uuid' -class SliceServiceModel(Model): # pylint: disable=abstract-method - pk = PrimaryKeyField() - slice_fk = ForeignKeyField(SliceModel) - service_fk = ForeignKeyField(ServiceModel) +# +# class ServiceEndPointModel(Model): # pylint: disable=abstract-method +# pk = PrimaryKeyField() +# service_fk = ForeignKeyField(ServiceModel) +# endpoint_fk = ForeignKeyField(EndPointModel) +# +# class SliceEndPointModel(Model): # pylint: disable=abstract-method +# pk = PrimaryKeyField() +# slice_fk = ForeignKeyField(SliceModel) +# endpoint_fk = ForeignKeyField(EndPointModel) +# +# class SliceServiceModel(Model): # pylint: disable=abstract-method +# pk = PrimaryKeyField() +# slice_fk = ForeignKeyField(SliceModel) +# service_fk = ForeignKeyField(ServiceMo# pylint: disable=abstract-method +# __tablename__ = 'LinkEndPoint' +# uuid = Column(UUID(as_uuid=False), primary_key=True, unique=True) +# link_uuid = Column(UUID(as_uuid=False), ForeignKey("Link.link_uuid")) +# endpoint_uuid = Column(UUID(as_uuid=False), ForeignKey("EndPoint.endpoint_uuid")) +#del) +# +# class SliceSubSliceModel(Model): # pylint: disable=abstract-method +# pk = PrimaryKeyField() +# slice_fk = ForeignKeyField(SliceModel) +# sub_slice_fk = ForeignKeyField(SliceModel) -class SliceSubSliceModel(Model): # pylint: disable=abstract-method - pk = PrimaryKeyField() - slice_fk = ForeignKeyField(SliceModel) - sub_slice_fk = ForeignKeyField(SliceModel) +class TopologyDeviceModel(Base): # pylint: disable=abstract-method + __tablename__ = 'TopologyDevice' + # uuid = Column(UUID(as_uuid=False), primary_key=True, unique=True) + topology_uuid = Column(UUID(as_uuid=False), ForeignKey("Topology.topology_uuid")) + device_uuid = Column(UUID(as_uuid=False), ForeignKey("Device.device_uuid"), primary_key=True) -class TopologyDeviceModel(Model): # pylint: disable=abstract-method - pk = PrimaryKeyField() - topology_fk = ForeignKeyField(TopologyModel) - device_fk = ForeignKeyField(DeviceModel) + @staticmethod + def main_pk_name(): + return 'device_uuid' +# +class TopologyLinkModel(Base): # pylint: disable=abstract-method + __tablename__ = 'TopologyLink' + topology_uuid = Column(UUID(as_uuid=False), ForeignKey("Topology.topology_uuid")) + link_uuid = Column(UUID(as_uuid=False), ForeignKey("Link.link_uuid"), primary_key=True) -class TopologyLinkModel(Model): # pylint: disable=abstract-method - pk = PrimaryKeyField() - topology_fk = ForeignKeyField(TopologyModel) - link_fk = ForeignKeyField(LinkModel) + @staticmethod + def main_pk_name(): + return 'link_uuid' \ No newline at end of file diff --git a/src/context/service/database/ServiceModel.py b/src/context/service/database/ServiceModel.py index 8b32d1cc9..a5223d615 100644 --- a/src/context/service/database/ServiceModel.py +++ b/src/context/service/database/ServiceModel.py @@ -13,20 +13,17 @@ # limitations under the License. import functools, logging, operator -from enum import Enum +from sqlalchemy import Column, ForeignKey, String, Enum from typing import Dict, List -from common.orm.fields.EnumeratedField import EnumeratedField -from common.orm.fields.ForeignKeyField import ForeignKeyField -from common.orm.fields.PrimaryKeyField import PrimaryKeyField -from common.orm.fields.StringField import StringField -from common.orm.model.Model import Model from common.orm.HighLevel import get_related_objects from common.proto.context_pb2 import ServiceStatusEnum, ServiceTypeEnum from .ConfigModel import ConfigModel from .ConstraintModel import ConstraintsModel from .ContextModel import ContextModel from .Tools import grpc_to_enum - +from sqlalchemy import Column, ForeignKey +from sqlalchemy.dialects.postgresql import UUID +from context.service.database.Base import Base LOGGER = logging.getLogger(__name__) class ORM_ServiceTypeEnum(Enum): @@ -47,14 +44,15 @@ class ORM_ServiceStatusEnum(Enum): grpc_to_enum__service_status = functools.partial( grpc_to_enum, ServiceStatusEnum, ORM_ServiceStatusEnum) -class ServiceModel(Model): - pk = PrimaryKeyField() - context_fk = ForeignKeyField(ContextModel) - service_uuid = StringField(required=True, allow_empty=False) - service_type = EnumeratedField(ORM_ServiceTypeEnum, required=True) - service_constraints_fk = ForeignKeyField(ConstraintsModel) - service_status = EnumeratedField(ORM_ServiceStatusEnum, required=True) - service_config_fk = ForeignKeyField(ConfigModel) +class ServiceModel(Base): + __tablename__ = 'Service' + + service_uuid = Column(UUID(as_uuid=False), primary_key=True, unique=True) + service_type = Column(Enum(ORM_ServiceTypeEnum, create_constraint=False, native_enum=False, allow_empty=False)) + # service_constraints = Column(UUID(as_uuid=False), ForeignKey("EndPoint.endpoint_uuid", ondelete='SET NULL')) + # context_fk = ForeignKeyField(ContextModel) + service_status = Column(Enum(ORM_ServiceStatusEnum, create_constraint=False, native_enum=False, allow_empty=False)) + # service_config_fk = ForeignKeyField(ConfigModel) def delete(self) -> None: #pylint: disable=import-outside-toplevel diff --git a/src/context/service/database/TopologyModel.py b/src/context/service/database/TopologyModel.py index 2925a27fa..063a1f511 100644 --- a/src/context/service/database/TopologyModel.py +++ b/src/context/service/database/TopologyModel.py @@ -26,7 +26,7 @@ class TopologyModel(Base): topology_uuid = Column(UUID(as_uuid=False), primary_key=True, unique=True) # Relationships - context = relationship("ContextModel", back_populates="topology", lazy="joined") + context = relationship("ContextModel", back_populates="topology") def dump_id(self) -> Dict: context_id = self.context.dump_id() @@ -39,21 +39,12 @@ class TopologyModel(Base): def main_pk_name() -> str: return 'topology_uuid' - """def dump_device_ids(self) -> List[Dict]: - from .RelationModels import TopologyDeviceModel # pylint: disable=import-outside-toplevel - db_devices = get_related_objects(self, TopologyDeviceModel, 'device_fk') - return [db_device.dump_id() for db_device in sorted(db_devices, key=operator.attrgetter('pk'))] - - def dump_link_ids(self) -> List[Dict]: - from .RelationModels import TopologyLinkModel # pylint: disable=import-outside-toplevel - db_links = get_related_objects(self, TopologyLinkModel, 'link_fk') - return [db_link.dump_id() for db_link in sorted(db_links, key=operator.attrgetter('pk'))] - """ - def dump( # pylint: disable=arguments-differ - self, include_devices=True, include_links=True + self, devices=None, links=None ) -> Dict: result = {'topology_id': self.dump_id()} - # if include_devices: result['device_ids'] = self.dump_device_ids() - # if include_links: result['link_ids'] = self.dump_link_ids() + if devices: + result['device_ids'] = [device.dump_id() for device in devices] + if links: + result['link_ids'] = [link.dump_id() for link in links] return result diff --git a/src/context/service/grpc_server/ContextServiceServicerImpl.py b/src/context/service/grpc_server/ContextServiceServicerImpl.py index 108ab9950..264ae3198 100644 --- a/src/context/service/grpc_server/ContextServiceServicerImpl.py +++ b/src/context/service/grpc_server/ContextServiceServicerImpl.py @@ -60,6 +60,7 @@ from context.service.database.Events import notify_event from context.service.database.EndPointModel import EndPointModel from context.service.database.EndPointModel import KpiSampleTypeModel from context.service.database.LinkModel import LinkModel +from context.service.database.RelationModels import (TopologyDeviceModel, TopologyLinkModel, LinkEndPointModel) from .Constants import ( CONSUME_TIMEOUT, TOPIC_CONNECTION, TOPIC_CONTEXT, TOPIC_DEVICE, TOPIC_LINK, TOPIC_SERVICE, TOPIC_SLICE, @@ -202,16 +203,30 @@ class ContextServiceServicerImpl(ContextServiceServicer): @safe_and_metered_rpc_method(METRICS, LOGGER) def GetTopology(self, request: TopologyId, context : grpc.ServicerContext) -> Topology: - context_uuid = request.context_id.context_uuid.uuid topology_uuid = request.topology_uuid.uuid + result, dump = self.database.get_object(TopologyModel, topology_uuid, True) with self.session() as session: - result = session.query(TopologyModel).join(TopologyModel.context).filter(TopologyModel.topology_uuid==topology_uuid).options(contains_eager(TopologyModel.context)).one_or_none() + devs = None + links = None - if not result: - raise NotFoundException(TopologyModel.__name__.replace('Model', ''), topology_uuid) + filt = {'topology_uuid': topology_uuid} + topology_devices = session.query(TopologyDeviceModel).filter_by(**filt).all() + if topology_devices: + devs = [] + for td in topology_devices: + filt = {'device_uuid': td.device_uuid} + devs.append(session.query(DeviceModel).filter_by(**filt).one()) + + filt = {'topology_uuid': topology_uuid} + topology_links = session.query(TopologyLinkModel).filter_by(**filt).all() + if topology_links: + links = [] + for tl in topology_links: + filt = {'link_uuid': tl.link_uuid} + links.append(session.query(LinkModel).filter_by(**filt).one()) - return Topology(**result.dump()) + return Topology(**result.dump(devs, links)) @safe_and_metered_rpc_method(METRICS, LOGGER) @@ -221,15 +236,30 @@ class ContextServiceServicerImpl(ContextServiceServicer): with self.session() as session: topology_add = TopologyModel(topology_uuid=topology_uuid, context_uuid=context_uuid) updated = True - result = session.query(TopologyModel).join(TopologyModel.context).filter(TopologyModel.topology_uuid==topology_uuid).one_or_none() - if not result: + db_topology = session.query(TopologyModel).join(TopologyModel.context).filter(TopologyModel.topology_uuid==topology_uuid).one_or_none() + if not db_topology: updated = False session.merge(topology_add) session.commit() - result = session.query(TopologyModel).join(TopologyModel.context).filter(TopologyModel.topology_uuid==topology_uuid).one_or_none() + db_topology = session.query(TopologyModel).join(TopologyModel.context).filter(TopologyModel.topology_uuid==topology_uuid).one_or_none() + + for device_id in request.device_ids: + device_uuid = device_id.device_uuid.uuid + td = TopologyDeviceModel(topology_uuid=topology_uuid, device_uuid=device_uuid) + result: Tuple[TopologyDeviceModel, bool] = self.database.create_or_update(td) + + + for link_id in request.link_ids: + link_uuid = link_id.link_uuid.uuid + db_link = session.query(LinkModel).filter( + LinkModel.link_uuid == link_uuid).one_or_none() + tl = TopologyLinkModel(topology_uuid=topology_uuid, link_uuid=link_uuid) + result: Tuple[TopologyDeviceModel, bool] = self.database.create_or_update(tl) + + event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE - dict_topology_id = result.dump_id() + dict_topology_id = db_topology.dump_id() notify_event(self.messagebroker, TOPIC_TOPOLOGY, event_type, {'topology_id': dict_topology_id}) return TopologyId(**dict_topology_id) @@ -289,9 +319,10 @@ class ContextServiceServicerImpl(ContextServiceServicer): with self.session() as session: device_uuid = request.device_id.device_uuid.uuid - for i,endpoint in enumerate(request.device_endpoints): + for i, endpoint in enumerate(request.device_endpoints): endpoint_device_uuid = endpoint.endpoint_id.device_id.device_uuid.uuid - if len(endpoint_device_uuid) == 0: endpoint_device_uuid = device_uuid + if len(endpoint_device_uuid) == 0: + endpoint_device_uuid = device_uuid if device_uuid != endpoint_device_uuid: raise InvalidArgumentException( 'request.device_endpoints[{:d}].device_id.device_uuid.uuid'.format(i), endpoint_device_uuid, @@ -313,12 +344,12 @@ class ContextServiceServicerImpl(ContextServiceServicer): self.set_drivers(db_device, request.device_drivers) - for i,endpoint in enumerate(request.device_endpoints): + for i, endpoint in enumerate(request.device_endpoints): endpoint_uuid = endpoint.endpoint_id.endpoint_uuid.uuid - endpoint_device_uuid = endpoint.endpoint_id.device_id.device_uuid.uuid - if len(endpoint_device_uuid) == 0: endpoint_device_uuid = device_uuid + # endpoint_device_uuid = endpoint.endpoint_id.device_id.device_uuid.uuid + # if len(endpoint_device_uuid) == 0: + # endpoint_device_uuid = device_uuid - str_endpoint_key = key_to_str([device_uuid, endpoint_uuid]) endpoint_attributes = { 'device_uuid' : db_device.device_uuid, 'endpoint_uuid': endpoint_uuid, @@ -328,17 +359,19 @@ class ContextServiceServicerImpl(ContextServiceServicer): endpoint_topology_context_uuid = endpoint.endpoint_id.topology_id.context_id.context_uuid.uuid endpoint_topology_uuid = endpoint.endpoint_id.topology_id.topology_uuid.uuid if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0: - str_topology_key = key_to_str([endpoint_topology_context_uuid, endpoint_topology_uuid]) + # str_topology_key = key_to_str([endpoint_topology_context_uuid, endpoint_topology_uuid]) - db_topology: TopologyModel = self.database.get_object(TopologyModel, endpoint_topology_uuid) - new_topo = TopologyModel(context_uuid=db_topology.context_uuid, topology_uuid=db_topology.topology_uuid, device_uuids=db_device.device_uuid) + db_topology, topo_dump = self.database.get_object(TopologyModel, endpoint_topology_uuid) - self.database.create_or_update(new_topo) + topology_device = TopologyDeviceModel( + topology_uuid=endpoint_topology_uuid, + device_uuid=db_device.device_uuid) + self.database.create_or_update(topology_device) endpoint_attributes['topology_uuid'] = db_topology.topology_uuid new_endpoint = EndPointModel(**endpoint_attributes) - result : Tuple[EndPointModel, bool] = self.database.create_or_update(new_endpoint) + result: Tuple[EndPointModel, bool] = self.database.create_or_update(new_endpoint) db_endpoint, updated = result self.set_kpi_sample_types(db_endpoint, endpoint.kpi_sample_types) @@ -465,10 +498,15 @@ class ContextServiceServicerImpl(ContextServiceServicer): device_uuid = request.device_uuid.uuid with self.session() as session: - result = session.query(DeviceModel).filter_by(device_uuid=device_uuid).one_or_none() - if not result: + db_device = session.query(DeviceModel).filter_by(device_uuid=device_uuid).one_or_none() + + session.query(TopologyDeviceModel).filter_by(device_uuid=device_uuid).delete() + session.query(ConfigRuleModel).filter_by(config_uuid=db_device.device_config_uuid).delete() + session.query(ConfigModel).filter_by(config_uuid=db_device.device_config_uuid).delete() + + if not db_device: return Empty() - dict_device_id = result.dump_id() + dict_device_id = db_device.dump_id() session.query(DeviceModel).filter_by(device_uuid=device_uuid).delete() session.commit() @@ -496,19 +534,41 @@ class ContextServiceServicerImpl(ContextServiceServicer): @safe_and_metered_rpc_method(METRICS, LOGGER) def ListLinks(self, request: Empty, context : grpc.ServicerContext) -> LinkList: with self.session() as session: - result = session.query(DeviceModel).all() - return LinkList(links=[db_link.dump() for db_link in result]) + link_list = LinkList() + + db_links = session.query(LinkModel).all() + + for db_link in db_links: + link_uuid = db_link.link_uuid + filt = {'link_uuid': link_uuid} + link_endpoints = session.query(LinkEndPointModel).filter_by(**filt).all() + if link_endpoints: + eps = [] + for lep in link_endpoints: + filt = {'endpoint_uuid': lep.endpoint_uuid} + eps.append(session.query(EndPointModel).filter_by(**filt).one()) + link_list.links.append(Link(**db_link.dump(eps))) + + return link_list @safe_and_metered_rpc_method(METRICS, LOGGER) def GetLink(self, request: LinkId, context : grpc.ServicerContext) -> Link: link_uuid = request.link_uuid.uuid with self.session() as session: - result = session.query(LinkModel).filter(LinkModel.device_uuid == link_uuid).one_or_none() + result = session.query(LinkModel).filter(LinkModel.link_uuid == link_uuid).one_or_none() if not result: - raise NotFoundException(DeviceModel.__name__.replace('Model', ''), link_uuid) + raise NotFoundException(LinkModel.__name__.replace('Model', ''), link_uuid) - rd = result.dump() + filt = {'link_uuid': link_uuid} + link_endpoints = session.query(LinkEndPointModel).filter_by(**filt).all() + if link_endpoints: + eps = [] + for lep in link_endpoints: + filt = {'endpoint_uuid': lep.endpoint_uuid} + eps.append(session.query(EndPointModel).filter_by(**filt).one()) + return Link(**result.dump(eps)) + rd = result.dump() rt = Link(**rd) return rt @@ -520,7 +580,7 @@ class ContextServiceServicerImpl(ContextServiceServicer): link_uuid = request.link_id.link_uuid.uuid new_link = LinkModel(**{ - 'lin_uuid': link_uuid + 'link_uuid': link_uuid }) result: Tuple[LinkModel, bool] = self.database.create_or_update(new_link) db_link, updated = result @@ -531,33 +591,20 @@ class ContextServiceServicerImpl(ContextServiceServicer): endpoint_topology_uuid = endpoint_id.topology_id.topology_uuid.uuid endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid - str_endpoint_key = key_to_str([endpoint_device_uuid, endpoint_uuid]) + db_topology = None if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0: - str_topology_key = key_to_str([endpoint_topology_context_uuid, endpoint_topology_uuid]) - # db_topology : TopologyModel = get_object(self.database, TopologyModel, str_topology_key) - db_topology : TopologyModel = self.database.get_object(TopologyModel, str_topology_key) - str_topology_device_key = key_to_str([str_topology_key, endpoint_device_uuid], separator='--') + db_topology: TopologyModel = self.database.get_object(TopologyModel, endpoint_topology_uuid) # check device is in topology - # get_object(self.database, TopologyDeviceModel, str_topology_device_key) - # str_endpoint_key = key_to_str([str_endpoint_key, str_topology_key], separator=':') - - # db_endpoint : EndPointModel = get_object(self.database, EndPointModel, str_endpoint_key) - LOGGER.info('str_endpoint_key: {}'.format(str_endpoint_key)) - db_endpoint: EndPointModel = self.database.get_object(EndPointModel, str_endpoint_key) - - # str_link_endpoint_key = key_to_str([link_uuid, endpoint_device_uuid], separator='--') - # result : Tuple[LinkEndPointModel, bool] = get_or_create_object( - # self.database, LinkEndPointModel, str_link_endpoint_key, { - # 'link_fk': db_link, 'endpoint_fk': db_endpoint}) - #db_link_endpoint, link_endpoint_created = result - - # if db_topology is not None: - # str_topology_link_key = key_to_str([str_topology_key, link_uuid], separator='--') - # result : Tuple[TopologyLinkModel, bool] = get_or_create_object( - # self.database, TopologyLinkModel, str_topology_link_key, { - # 'topology_fk': db_topology, 'link_fk': db_link}) - # #db_topology_link, topology_link_created = result + self.database.get_object(TopologyDeviceModel, endpoint_device_uuid) + + + link_endpoint = LinkEndPointModel(link_uuid=link_uuid, endpoint_uuid=endpoint_uuid) + result: Tuple[LinkEndPointModel, bool] = self.database.create_or_update(link_endpoint) + + if db_topology is not None: + topology_link = TopologyLinkModel(topology_uuid=endpoint_topology_uuid, link_uuid=link_uuid) + result: Tuple[TopologyLinkModel, bool] = self.database.create_or_update(topology_link) event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE dict_link_id = db_link.dump_id() @@ -566,15 +613,19 @@ class ContextServiceServicerImpl(ContextServiceServicer): @safe_and_metered_rpc_method(METRICS, LOGGER) def RemoveLink(self, request: LinkId, context : grpc.ServicerContext) -> Empty: - with self.lock: + with self.session() as session: link_uuid = request.link_uuid.uuid - db_link = LinkModel(self.database, link_uuid, auto_load=False) - found = db_link.load() - if not found: return Empty() - dict_link_id = db_link.dump_id() - db_link.delete() + session.query(TopologyLinkModel).filter_by(link_uuid=link_uuid).delete() + session.query(LinkEndPointModel).filter_by(link_uuid=link_uuid).delete() + + result = session.query(LinkModel).filter_by(link_uuid=link_uuid).one_or_none() + if not result: + return Empty() + dict_link_id = result.dump_id() + session.query(LinkModel).filter_by(link_uuid=link_uuid).delete() + session.commit() event_type = EventTypeEnum.EVENTTYPE_REMOVE notify_event(self.messagebroker, TOPIC_LINK, event_type, {'link_id': dict_link_id}) return Empty() @@ -584,7 +635,6 @@ class ContextServiceServicerImpl(ContextServiceServicer): for message in self.messagebroker.consume({TOPIC_LINK}, consume_timeout=CONSUME_TIMEOUT): yield LinkEvent(**json.loads(message.content)) - """ # ----- Service ---------------------------------------------------------------------------------------------------- @@ -693,6 +743,7 @@ class ContextServiceServicerImpl(ContextServiceServicer): for message in self.messagebroker.consume({TOPIC_SERVICE}, consume_timeout=CONSUME_TIMEOUT): yield ServiceEvent(**json.loads(message.content)) + """ # ----- Slice ---------------------------------------------------------------------------------------------------- diff --git a/src/context/tests/Objects.py b/src/context/tests/Objects.py index 772da38e0..a2aebdd96 100644 --- a/src/context/tests/Objects.py +++ b/src/context/tests/Objects.py @@ -45,6 +45,7 @@ PACKET_PORT_SAMPLE_TYPES = [ # ----- Device --------------------------------------------------------------------------------------------------------- +EP1 = '5610e2c0-8abe-4127-80d0-7c68aff1c19e' EP2 = '7eb80584-2587-4e71-b10c-f3a5c48e84ab' EP3 = '368baf47-0540-4ab4-add8-a19b5167162c' EP100 = '6a923121-36e1-4b5e-8cd6-90aceca9b5cf' @@ -66,12 +67,12 @@ DEVICE_R1 = json_device_packetrouter_disabled( DEVICE_R1_UUID, endpoints=DEVICE_R1_EPS, config_rules=DEVICE_R1_RULES) -DEVICE_R2_UUID = 'R2' +DEVICE_R2_UUID = '2fd2be23-5b20-414c-b1ea-2f16ae6eb425' DEVICE_R2_ID = json_device_id(DEVICE_R2_UUID) DEVICE_R2_EPS = [ - json_endpoint(DEVICE_R2_ID, 'EP1', '10G', topology_id=TOPOLOGY_ID, kpi_sample_types=PACKET_PORT_SAMPLE_TYPES), - json_endpoint(DEVICE_R2_ID, 'EP3', '10G', topology_id=TOPOLOGY_ID, kpi_sample_types=PACKET_PORT_SAMPLE_TYPES), - json_endpoint(DEVICE_R2_ID, 'EP100', '10G', topology_id=TOPOLOGY_ID, kpi_sample_types=PACKET_PORT_SAMPLE_TYPES), + json_endpoint(DEVICE_R2_ID, EP1, '10G', topology_id=TOPOLOGY_ID, kpi_sample_types=PACKET_PORT_SAMPLE_TYPES), + json_endpoint(DEVICE_R2_ID, EP3, '10G', topology_id=TOPOLOGY_ID, kpi_sample_types=PACKET_PORT_SAMPLE_TYPES), + json_endpoint(DEVICE_R2_ID, EP100, '10G', topology_id=TOPOLOGY_ID, kpi_sample_types=PACKET_PORT_SAMPLE_TYPES), ] DEVICE_R2_RULES = [ json_config_rule_set('dev/rsrc1/value', 'value4'), @@ -82,12 +83,12 @@ DEVICE_R2 = json_device_packetrouter_disabled( DEVICE_R2_UUID, endpoints=DEVICE_R2_EPS, config_rules=DEVICE_R2_RULES) -DEVICE_R3_UUID = 'R3' +DEVICE_R3_UUID = '3e71a251-2218-42c5-b4b8-de7760c0d9b3' DEVICE_R3_ID = json_device_id(DEVICE_R3_UUID) DEVICE_R3_EPS = [ - json_endpoint(DEVICE_R3_ID, 'EP1', '10G', topology_id=TOPOLOGY_ID, kpi_sample_types=PACKET_PORT_SAMPLE_TYPES), - json_endpoint(DEVICE_R3_ID, 'EP2', '10G', topology_id=TOPOLOGY_ID, kpi_sample_types=PACKET_PORT_SAMPLE_TYPES), - json_endpoint(DEVICE_R3_ID, 'EP100', '10G', topology_id=TOPOLOGY_ID, kpi_sample_types=PACKET_PORT_SAMPLE_TYPES), + json_endpoint(DEVICE_R3_ID, EP2, '10G', topology_id=TOPOLOGY_ID, kpi_sample_types=PACKET_PORT_SAMPLE_TYPES), + json_endpoint(DEVICE_R3_ID, EP3, '10G', topology_id=TOPOLOGY_ID, kpi_sample_types=PACKET_PORT_SAMPLE_TYPES), + json_endpoint(DEVICE_R3_ID, EP100, '10G', topology_id=TOPOLOGY_ID, kpi_sample_types=PACKET_PORT_SAMPLE_TYPES), ] DEVICE_R3_RULES = [ json_config_rule_set('dev/rsrc1/value', 'value4'), @@ -99,29 +100,29 @@ DEVICE_R3 = json_device_packetrouter_disabled( # ----- Link ----------------------------------------------------------------------------------------------------------- -LINK_R1_R2_UUID = 'R1/EP2-R2/EP1' +LINK_R1_R2_UUID = 'c8f92eec-340e-4d31-8d7e-7074927dc889' LINK_R1_R2_ID = json_link_id(LINK_R1_R2_UUID) LINK_R1_R2_EPIDS = [ - json_endpoint_id(DEVICE_R1_ID, 'EP2', topology_id=TOPOLOGY_ID), - json_endpoint_id(DEVICE_R2_ID, 'EP1', topology_id=TOPOLOGY_ID), + json_endpoint_id(DEVICE_R1_ID, EP2, topology_id=TOPOLOGY_ID), + json_endpoint_id(DEVICE_R2_ID, EP1, topology_id=TOPOLOGY_ID), ] LINK_R1_R2 = json_link(LINK_R1_R2_UUID, LINK_R1_R2_EPIDS) -LINK_R2_R3_UUID = 'R2/EP3-R3/EP2' +LINK_R2_R3_UUID = 'f9e3539a-d8f9-4737-b4b4-cacf7f90aa5d' LINK_R2_R3_ID = json_link_id(LINK_R2_R3_UUID) LINK_R2_R3_EPIDS = [ - json_endpoint_id(DEVICE_R2_ID, 'EP3', topology_id=TOPOLOGY_ID), - json_endpoint_id(DEVICE_R3_ID, 'EP2', topology_id=TOPOLOGY_ID), + json_endpoint_id(DEVICE_R2_ID, EP3, topology_id=TOPOLOGY_ID), + json_endpoint_id(DEVICE_R3_ID, EP2, topology_id=TOPOLOGY_ID), ] LINK_R2_R3 = json_link(LINK_R2_R3_UUID, LINK_R2_R3_EPIDS) -LINK_R1_R3_UUID = 'R1/EP3-R3/EP1' +LINK_R1_R3_UUID = '1f1a988c-47a9-41b2-afd9-ebd6d434a0b4' LINK_R1_R3_ID = json_link_id(LINK_R1_R3_UUID) LINK_R1_R3_EPIDS = [ - json_endpoint_id(DEVICE_R1_ID, 'EP3', topology_id=TOPOLOGY_ID), - json_endpoint_id(DEVICE_R3_ID, 'EP1', topology_id=TOPOLOGY_ID), + json_endpoint_id(DEVICE_R1_ID, EP3, topology_id=TOPOLOGY_ID), + json_endpoint_id(DEVICE_R3_ID, EP1, topology_id=TOPOLOGY_ID), ] LINK_R1_R3 = json_link(LINK_R1_R3_UUID, LINK_R1_R3_EPIDS) -- GitLab From 1b2eef22feb1ec33fda9c1b33580f7dce0a63a19 Mon Sep 17 00:00:00 2001 From: cmanso <cmanso@protonmail.com> Date: Sun, 11 Dec 2022 23:43:52 +0100 Subject: [PATCH 009/158] Update scalability --- .../service/database/ConstraintModel.py | 310 ++++++++++-------- src/context/service/database/EndPointModel.py | 54 +-- src/context/service/database/ServiceModel.py | 61 ++-- .../grpc_server/ContextServiceServicerImpl.py | 207 ++++++++---- src/context/tests/Objects.py | 10 +- src/context/tests/test_unitary.py | 279 ++++++++-------- 6 files changed, 528 insertions(+), 393 deletions(-) diff --git a/src/context/service/database/ConstraintModel.py b/src/context/service/database/ConstraintModel.py index a35ec250d..c5ed7504d 100644 --- a/src/context/service/database/ConstraintModel.py +++ b/src/context/service/database/ConstraintModel.py @@ -13,91 +13,122 @@ # limitations under the License. import logging, operator -from enum import Enum from typing import Dict, List, Optional, Tuple, Type, Union -from common.orm.Database import Database from common.orm.HighLevel import get_object, get_or_create_object, update_or_create_object from common.orm.backend.Tools import key_to_str -from common.orm.fields.BooleanField import BooleanField -from common.orm.fields.EnumeratedField import EnumeratedField -from common.orm.fields.FloatField import FloatField -from common.orm.fields.ForeignKeyField import ForeignKeyField -from common.orm.fields.IntegerField import IntegerField -from common.orm.fields.PrimaryKeyField import PrimaryKeyField -from common.orm.fields.StringField import StringField -from common.orm.model.Model import Model from common.proto.context_pb2 import Constraint from common.tools.grpc.Tools import grpc_message_to_json_string -from .EndPointModel import EndPointModel, get_endpoint +from .EndPointModel import EndPointModel from .Tools import fast_hasher, remove_dict_key +from sqlalchemy import Column, ForeignKey, String, Float, CheckConstraint, Integer, Boolean, Enum +from sqlalchemy.dialects.postgresql import UUID +from context.service.database.Base import Base +import enum LOGGER = logging.getLogger(__name__) -class ConstraintsModel(Model): # pylint: disable=abstract-method - pk = PrimaryKeyField() - def delete(self) -> None: - db_constraint_pks = self.references(ConstraintModel) - for pk,_ in db_constraint_pks: ConstraintModel(self.database, pk).delete() - super().delete() +class ConstraintsModel(Base): # pylint: disable=abstract-method + __tablename__ = 'Constraints' + constraints_uuid = Column(UUID(as_uuid=False), primary_key=True, unique=True) - def dump(self) -> List[Dict]: - db_constraint_pks = self.references(ConstraintModel) - constraints = [ConstraintModel(self.database, pk).dump(include_position=True) for pk,_ in db_constraint_pks] + @staticmethod + def main_pk_name(): + return 'constraints_uuid' + + + def dump(self, constraints) -> List[Dict]: constraints = sorted(constraints, key=operator.itemgetter('position')) return [remove_dict_key(constraint, 'position') for constraint in constraints] -class ConstraintCustomModel(Model): # pylint: disable=abstract-method - constraint_type = StringField(required=True, allow_empty=False) - constraint_value = StringField(required=True, allow_empty=False) + +class ConstraintCustomModel(Base): # pylint: disable=abstract-method + __tablename__ = 'ConstraintCustom' + constraint_uuid = Column(UUID(as_uuid=False), primary_key=True, unique=True) + constraint_type = Column(String, nullable=False) + constraint_value = Column(String, nullable=False) + + @staticmethod + def main_pk_name(): + return 'constraint_uuid' + def dump(self) -> Dict: # pylint: disable=arguments-differ return {'custom': {'constraint_type': self.constraint_type, 'constraint_value': self.constraint_value}} + Union_ConstraintEndpoint = Union[ 'ConstraintEndpointLocationGpsPositionModel', 'ConstraintEndpointLocationRegionModel', 'ConstraintEndpointPriorityModel' ] -def dump_endpoint_id(endpoint_constraint : Union_ConstraintEndpoint): - db_endpoints_pks = list(endpoint_constraint.references(EndPointModel)) - num_endpoints = len(db_endpoints_pks) - if num_endpoints != 1: - raise Exception('Wrong number({:d}) of associated Endpoints with constraint'.format(num_endpoints)) - db_endpoint = EndPointModel(endpoint_constraint.database, db_endpoints_pks[0]) - return db_endpoint.dump_id() - -class ConstraintEndpointLocationRegionModel(Model): # pylint: disable=abstract-method - endpoint_fk = ForeignKeyField(EndPointModel) - region = StringField(required=True, allow_empty=False) - def dump(self) -> Dict: # pylint: disable=arguments-differ - return {'endpoint_location': {'endpoint_id': dump_endpoint_id(self), 'region': self.region}} -class ConstraintEndpointLocationGpsPositionModel(Model): # pylint: disable=abstract-method - endpoint_fk = ForeignKeyField(EndPointModel) - latitude = FloatField(required=True, min_value=-90.0, max_value=90.0) - longitude = FloatField(required=True, min_value=-180.0, max_value=180.0) +# def dump_endpoint_id(endpoint_constraint: Union_ConstraintEndpoint): +# db_endpoints_pks = list(endpoint_constraint.references(EndPointModel)) +# num_endpoints = len(db_endpoints_pks) +# if num_endpoints != 1: +# raise Exception('Wrong number({:d}) of associated Endpoints with constraint'.format(num_endpoints)) +# db_endpoint = EndPointModel(endpoint_constraint.database, db_endpoints_pks[0]) +# return db_endpoint.dump_id() - def dump(self) -> Dict: # pylint: disable=arguments-differ - gps_position = {'latitude': self.latitude, 'longitude': self.longitude} - return {'endpoint_location': {'endpoint_id': dump_endpoint_id(self), 'gps_position': gps_position}} -class ConstraintEndpointPriorityModel(Model): # pylint: disable=abstract-method - endpoint_fk = ForeignKeyField(EndPointModel) - priority = FloatField(required=True) +class ConstraintEndpointLocationRegionModel(Base): # pylint: disable=abstract-method + __tablename__ = 'ConstraintEndpointLocationRegion' + constraint_uuid = Column(UUID(as_uuid=False), primary_key=True, unique=True) + endpoint_uuid = Column(UUID(as_uuid=False), ForeignKey("EndPoint.endpoint_uuid")) + region = Column(String, nullable=False) + + @staticmethod + def main_pk_name(): + return 'constraint_uuid' + + def dump(self, endpoint) -> Dict: # pylint: disable=arguments-differ + return {'endpoint_location': {'endpoint_id': endpoint.dump_id(), 'region': self.region}} - def dump(self) -> Dict: # pylint: disable=arguments-differ - return {'endpoint_priority': {'endpoint_id': dump_endpoint_id(self), 'priority': self.priority}} -class ConstraintSlaAvailabilityModel(Model): # pylint: disable=abstract-method - num_disjoint_paths = IntegerField(required=True, min_value=1) - all_active = BooleanField(required=True) +class ConstraintEndpointLocationGpsPositionModel(Base): # pylint: disable=abstract-method + __tablename__ = 'ConstraintEndpointLocationGpsPosition' + constraint_uuid = Column(UUID(as_uuid=False), primary_key=True, unique=True) + endpoint_uuid = Column(UUID(as_uuid=False), ForeignKey("EndPoint.endpoint_uuid")) + latitude = Column(Float, CheckConstraint('latitude > -90.0 AND latitude < 90.0'), nullable=False) + longitude = Column(Float, CheckConstraint('longitude > -90.0 AND longitude < 90.0'), nullable=False) + + def dump(self, endpoint) -> Dict: # pylint: disable=arguments-differ + gps_position = {'latitude': self.latitude, 'longitude': self.longitude} + return {'endpoint_location': {'endpoint_id': endpoint.dump_id(), 'gps_position': gps_position}} + + +class ConstraintEndpointPriorityModel(Base): # pylint: disable=abstract-method + __tablename__ = 'ConstraintEndpointPriority' + constraint_uuid = Column(UUID(as_uuid=False), primary_key=True, unique=True) + endpoint_uuid = Column(UUID(as_uuid=False), ForeignKey("EndPoint.endpoint_uuid")) + # endpoint_fk = ForeignKeyField(EndPointModel) + # priority = FloatField(required=True) + priority = Column(Float, nullable=False) + @staticmethod + def main_pk_name(): + return 'constraint_uuid' + + def dump(self, endpoint) -> Dict: # pylint: disable=arguments-differ + return {'endpoint_priority': {'endpoint_id': endpoint.dump_id(), 'priority': self.priority}} + + +class ConstraintSlaAvailabilityModel(Base): # pylint: disable=abstract-method + __tablename__ = 'ConstraintSlaAvailability' + constraint_uuid = Column(UUID(as_uuid=False), primary_key=True, unique=True) + # num_disjoint_paths = IntegerField(required=True, min_value=1) + num_disjoint_paths = Column(Integer, CheckConstraint('num_disjoint_paths > 1'), nullable=False) + # all_active = BooleanField(required=True) + all_active = Column(Boolean, nullable=False) + @staticmethod + def main_pk_name(): + return 'constraint_uuid' def dump(self) -> Dict: # pylint: disable=arguments-differ return {'sla_availability': {'num_disjoint_paths': self.num_disjoint_paths, 'all_active': self.all_active}} # enum values should match name of field in ConstraintModel -class ConstraintKindEnum(Enum): +class ConstraintKindEnum(enum.Enum): CUSTOM = 'custom' ENDPOINT_LOCATION_REGION = 'ep_loc_region' ENDPOINT_LOCATION_GPSPOSITION = 'ep_loc_gpspos' @@ -109,41 +140,56 @@ Union_SpecificConstraint = Union[ ConstraintEndpointPriorityModel, ConstraintSlaAvailabilityModel, ] -class ConstraintModel(Model): # pylint: disable=abstract-method - pk = PrimaryKeyField() - constraints_fk = ForeignKeyField(ConstraintsModel) - kind = EnumeratedField(ConstraintKindEnum) - position = IntegerField(min_value=0, required=True) - constraint_custom_fk = ForeignKeyField(ConstraintCustomModel, required=False) - constraint_ep_loc_region_fk = ForeignKeyField(ConstraintEndpointLocationRegionModel, required=False) - constraint_ep_loc_gpspos_fk = ForeignKeyField(ConstraintEndpointLocationGpsPositionModel, required=False) - constraint_ep_priority_fk = ForeignKeyField(ConstraintEndpointPriorityModel, required=False) - constraint_sla_avail_fk = ForeignKeyField(ConstraintSlaAvailabilityModel, required=False) - - def delete(self) -> None: - field_name = 'constraint_{:s}_fk'.format(str(self.kind.value)) - specific_fk_value : Optional[ForeignKeyField] = getattr(self, field_name, None) - if specific_fk_value is None: - raise Exception('Unable to find constraint key for field_name({:s})'.format(field_name)) - specific_fk_class = getattr(ConstraintModel, field_name, None) - foreign_model_class : Model = specific_fk_class.foreign_model - super().delete() - get_object(self.database, foreign_model_class, str(specific_fk_value)).delete() +class ConstraintModel(Base): # pylint: disable=abstract-method + __tablename__ = 'Constraint' + # pk = PrimaryKeyField() + # constraints_fk = ForeignKeyField(ConstraintsModel) + constraints_uuid = Column(UUID(as_uuid=False), ForeignKey("Constraints.constraints_uuid"), primary_key=True) + # kind = EnumeratedField(ConstraintKindEnum) + kind = Column(Enum(ConstraintKindEnum, create_constraint=False, native_enum=False)) + # position = IntegerField(min_value=0, required=True) + position = Column(Integer, CheckConstraint('position >= 0'), nullable=False) + # constraint_custom_fk = ForeignKeyField(ConstraintCustomModel, required=False) + constraint_custom = Column(UUID(as_uuid=False), ForeignKey("ConstraintCustom.constraint_uuid")) + # constraint_ep_loc_region_fk = ForeignKeyField(ConstraintEndpointLocationRegionModel, required=False) + constraint_ep_loc_region = Column(UUID(as_uuid=False), ForeignKey("ConstraintEndpointLocationRegion.constraint_uuid")) + # constraint_ep_loc_gpspos_fk = ForeignKeyField(ConstraintEndpointLocationGpsPositionModel, required=False) + constraint_ep_loc_gpspos = Column(UUID(as_uuid=False), ForeignKey("ConstraintEndpointLocationGpsPosition.constraint_uuid")) + # constraint_ep_priority_fk = ForeignKeyField(ConstraintEndpointPriorityModel, required=False) + constraint_ep_priority = Column(UUID(as_uuid=False), ForeignKey("ConstraintEndpointPriority.constraint_uuid"),) + # constraint_sla_avail_fk = ForeignKeyField(ConstraintSlaAvailabilityModel, required=False) + constraint_sla_avail = Column(UUID(as_uuid=False), ForeignKey("ConstraintSlaAvailability.constraint_uuid")) + + @staticmethod + def main_pk_name(): + return 'constraint_uuid' + + # def delete(self) -> None: + # field_name = 'constraint_{:s}_fk'.format(str(self.kind.value)) + # specific_fk_value : Optional[ForeignKeyField] = getattr(self, field_name, None) + # if specific_fk_value is None: + # raise Exception('Unable to find constraint key for field_name({:s})'.format(field_name)) + # specific_fk_class = getattr(ConstraintModel, field_name, None) + # foreign_model_class : Model = specific_fk_class.foreign_model + # super().delete() + # get_object(self.database, foreign_model_class, str(specific_fk_value)).delete() def dump(self, include_position=True) -> Dict: # pylint: disable=arguments-differ - field_name = 'constraint_{:s}_fk'.format(str(self.kind.value)) - specific_fk_value : Optional[ForeignKeyField] = getattr(self, field_name, None) + field_name = 'constraint_{:s}'.format(str(self.kind.value)) + specific_fk_value = getattr(self, field_name, None) if specific_fk_value is None: raise Exception('Unable to find constraint key for field_name({:s})'.format(field_name)) specific_fk_class = getattr(ConstraintModel, field_name, None) - foreign_model_class : Model = specific_fk_class.foreign_model - constraint : Union_SpecificConstraint = get_object(self.database, foreign_model_class, str(specific_fk_value)) + foreign_model_class: Base = specific_fk_class.foreign_model + constraint: Union_SpecificConstraint = get_object(self.database, foreign_model_class, str(specific_fk_value)) result = constraint.dump() - if include_position: result['position'] = self.position + if include_position: + result['position'] = self.position return result Tuple_ConstraintSpecs = Tuple[Type, str, Dict, ConstraintKindEnum] -def parse_constraint_custom(database : Database, grpc_constraint) -> Tuple_ConstraintSpecs: + +def parse_constraint_custom(grpc_constraint) -> Tuple_ConstraintSpecs: constraint_class = ConstraintCustomModel str_constraint_id = grpc_constraint.custom.constraint_type constraint_data = { @@ -152,11 +198,11 @@ def parse_constraint_custom(database : Database, grpc_constraint) -> Tuple_Const } return constraint_class, str_constraint_id, constraint_data, ConstraintKindEnum.CUSTOM -def parse_constraint_endpoint_location(database : Database, grpc_constraint) -> Tuple_ConstraintSpecs: +def parse_constraint_endpoint_location(db_endpoint, grpc_constraint) -> Tuple_ConstraintSpecs: grpc_endpoint_id = grpc_constraint.endpoint_location.endpoint_id - str_endpoint_key, db_endpoint = get_endpoint(database, grpc_endpoint_id) + # str_endpoint_key, db_endpoint = get_endpoint(database, grpc_endpoint_id) - str_constraint_id = str_endpoint_key + str_constraint_id = db_endpoint.endpoint_uuid constraint_data = {'endpoint_fk': db_endpoint} grpc_location = grpc_constraint.endpoint_location.location @@ -174,18 +220,18 @@ def parse_constraint_endpoint_location(database : Database, grpc_constraint) -> MSG = 'Location kind {:s} in Constraint of kind endpoint_location is not implemented: {:s}' raise NotImplementedError(MSG.format(location_kind, grpc_message_to_json_string(grpc_constraint))) -def parse_constraint_endpoint_priority(database : Database, grpc_constraint) -> Tuple_ConstraintSpecs: +def parse_constraint_endpoint_priority(db_endpoint, grpc_constraint) -> Tuple_ConstraintSpecs: grpc_endpoint_id = grpc_constraint.endpoint_priority.endpoint_id - str_endpoint_key, db_endpoint = get_endpoint(database, grpc_endpoint_id) + # str_endpoint_key, db_endpoint = get_endpoint(database, grpc_endpoint_id) constraint_class = ConstraintEndpointPriorityModel - str_constraint_id = str_endpoint_key + str_constraint_id = db_endpoint.endpoint_uuid priority = grpc_constraint.endpoint_priority.priority constraint_data = {'endpoint_fk': db_endpoint, 'priority': priority} return constraint_class, str_constraint_id, constraint_data, ConstraintKindEnum.ENDPOINT_PRIORITY -def parse_constraint_sla_availability(database : Database, grpc_constraint) -> Tuple_ConstraintSpecs: +def parse_constraint_sla_availability(grpc_constraint) -> Tuple_ConstraintSpecs: constraint_class = ConstraintSlaAvailabilityModel str_constraint_id = '' constraint_data = { @@ -206,50 +252,50 @@ Union_ConstraintModel = Union[ ConstraintEndpointPriorityModel, ConstraintSlaAvailabilityModel ] -def set_constraint( - database : Database, db_constraints : ConstraintsModel, grpc_constraint : Constraint, position : int -) -> Tuple[Union_ConstraintModel, bool]: - grpc_constraint_kind = str(grpc_constraint.WhichOneof('constraint')) - - parser = CONSTRAINT_PARSERS.get(grpc_constraint_kind) - if parser is None: - raise NotImplementedError('Constraint of kind {:s} is not implemented: {:s}'.format( - grpc_constraint_kind, grpc_message_to_json_string(grpc_constraint))) - - # create specific constraint - constraint_class, str_constraint_id, constraint_data, constraint_kind = parser(database, grpc_constraint) - str_constraint_key_hash = fast_hasher(':'.join([constraint_kind.value, str_constraint_id])) - str_constraint_key = key_to_str([db_constraints.pk, str_constraint_key_hash], separator=':') - result : Tuple[Union_ConstraintModel, bool] = update_or_create_object( - database, constraint_class, str_constraint_key, constraint_data) - db_specific_constraint, updated = result - - # create generic constraint - constraint_fk_field_name = 'constraint_{:s}_fk'.format(constraint_kind.value) - constraint_data = { - 'constraints_fk': db_constraints, 'position': position, 'kind': constraint_kind, - constraint_fk_field_name: db_specific_constraint - } - result : Tuple[ConstraintModel, bool] = update_or_create_object( - database, ConstraintModel, str_constraint_key, constraint_data) - db_constraint, updated = result - - return db_constraint, updated - -def set_constraints( - database : Database, db_parent_pk : str, constraints_name : str, grpc_constraints -) -> List[Tuple[Union[ConstraintsModel, ConstraintModel], bool]]: - - str_constraints_key = key_to_str([db_parent_pk, constraints_name], separator=':') - result : Tuple[ConstraintsModel, bool] = get_or_create_object(database, ConstraintsModel, str_constraints_key) - db_constraints, created = result - - db_objects = [(db_constraints, created)] - - for position,grpc_constraint in enumerate(grpc_constraints): - result : Tuple[ConstraintModel, bool] = set_constraint( - database, db_constraints, grpc_constraint, position) - db_constraint, updated = result - db_objects.append((db_constraint, updated)) - - return db_objects +# def set_constraint( +# db_constraints : ConstraintsModel, grpc_constraint : Constraint, position : int +# ) -> Tuple[Union_ConstraintModel, bool]: +# grpc_constraint_kind = str(grpc_constraint.WhichOneof('constraint')) +# +# parser = CONSTRAINT_PARSERS.get(grpc_constraint_kind) +# if parser is None: +# raise NotImplementedError('Constraint of kind {:s} is not implemented: {:s}'.format( +# grpc_constraint_kind, grpc_message_to_json_string(grpc_constraint))) +# +# # create specific constraint +# constraint_class, str_constraint_id, constraint_data, constraint_kind = parser(database, grpc_constraint) +# str_constraint_key_hash = fast_hasher(':'.join([constraint_kind.value, str_constraint_id])) +# str_constraint_key = key_to_str([db_constraints.pk, str_constraint_key_hash], separator=':') +# result : Tuple[Union_ConstraintModel, bool] = update_or_create_object( +# database, constraint_class, str_constraint_key, constraint_data) +# db_specific_constraint, updated = result +# +# # create generic constraint +# constraint_fk_field_name = 'constraint_{:s}_fk'.format(constraint_kind.value) +# constraint_data = { +# 'constraints_fk': db_constraints, 'position': position, 'kind': constraint_kind, +# constraint_fk_field_name: db_specific_constraint +# } +# result : Tuple[ConstraintModel, bool] = update_or_create_object( +# database, ConstraintModel, str_constraint_key, constraint_data) +# db_constraint, updated = result +# +# return db_constraint, updated +# +# def set_constraints( +# database : Database, db_parent_pk : str, constraints_name : str, grpc_constraints +# ) -> List[Tuple[Union[ConstraintsModel, ConstraintModel], bool]]: +# +# str_constraints_key = key_to_str([db_parent_pk, constraints_name], separator=':') +# result : Tuple[ConstraintsModel, bool] = get_or_create_object(database, ConstraintsModel, str_constraints_key) +# db_constraints, created = result +# +# db_objects = [(db_constraints, created)] +# +# for position,grpc_constraint in enumerate(grpc_constraints): +# result : Tuple[ConstraintModel, bool] = set_constraint( +# database, db_constraints, grpc_constraint, position) +# db_constraint, updated = result +# db_objects.append((db_constraint, updated)) +# +# return db_objects diff --git a/src/context/service/database/EndPointModel.py b/src/context/service/database/EndPointModel.py index fb2c9d26a..540453970 100644 --- a/src/context/service/database/EndPointModel.py +++ b/src/context/service/database/EndPointModel.py @@ -99,30 +99,30 @@ def set_kpi_sample_types(database : Database, db_endpoint : EndPointModel, grpc_ db_endpoint_kpi_sample_type.kpi_sample_type = orm_kpi_sample_type db_endpoint_kpi_sample_type.save() """ -def get_endpoint( - database : Database, grpc_endpoint_id : EndPointId, - validate_topology_exists : bool = True, validate_device_in_topology : bool = True -) -> Tuple[str, EndPointModel]: - endpoint_uuid = grpc_endpoint_id.endpoint_uuid.uuid - endpoint_device_uuid = grpc_endpoint_id.device_id.device_uuid.uuid - endpoint_topology_uuid = grpc_endpoint_id.topology_id.topology_uuid.uuid - endpoint_topology_context_uuid = grpc_endpoint_id.topology_id.context_id.context_uuid.uuid - str_endpoint_key = key_to_str([endpoint_device_uuid, endpoint_uuid]) - - if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0: - # check topology exists - str_topology_key = key_to_str([endpoint_topology_context_uuid, endpoint_topology_uuid]) - if validate_topology_exists: - from .TopologyModel import TopologyModel - get_object(database, TopologyModel, str_topology_key) - - # check device is in topology - str_topology_device_key = key_to_str([str_topology_key, endpoint_device_uuid], separator='--') - if validate_device_in_topology: - from .RelationModels import TopologyDeviceModel - get_object(database, TopologyDeviceModel, str_topology_device_key) - - str_endpoint_key = key_to_str([str_endpoint_key, str_topology_key], separator=':') - - db_endpoint : EndPointModel = get_object(database, EndPointModel, str_endpoint_key) - return str_endpoint_key, db_endpoint +# def get_endpoint( +# database : Database, grpc_endpoint_id : EndPointId, +# validate_topology_exists : bool = True, validate_device_in_topology : bool = True +# ) -> Tuple[str, EndPointModel]: +# endpoint_uuid = grpc_endpoint_id.endpoint_uuid.uuid +# endpoint_device_uuid = grpc_endpoint_id.device_id.device_uuid.uuid +# endpoint_topology_uuid = grpc_endpoint_id.topology_id.topology_uuid.uuid +# endpoint_topology_context_uuid = grpc_endpoint_id.topology_id.context_id.context_uuid.uuid +# str_endpoint_key = key_to_str([endpoint_device_uuid, endpoint_uuid]) +# +# if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0: +# # check topology exists +# str_topology_key = key_to_str([endpoint_topology_context_uuid, endpoint_topology_uuid]) +# if validate_topology_exists: +# from .TopologyModel import TopologyModel +# get_object(database, TopologyModel, str_topology_key) +# +# # check device is in topology +# str_topology_device_key = key_to_str([str_topology_key, endpoint_device_uuid], separator='--') +# if validate_device_in_topology: +# from .RelationModels import TopologyDeviceModel +# get_object(database, TopologyDeviceModel, str_topology_device_key) +# +# str_endpoint_key = key_to_str([str_endpoint_key, str_topology_key], separator=':') +# +# db_endpoint : EndPointModel = get_object(database, EndPointModel, str_endpoint_key) +# return str_endpoint_key, db_endpoint diff --git a/src/context/service/database/ServiceModel.py b/src/context/service/database/ServiceModel.py index a5223d615..8f358be52 100644 --- a/src/context/service/database/ServiceModel.py +++ b/src/context/service/database/ServiceModel.py @@ -13,7 +13,7 @@ # limitations under the License. import functools, logging, operator -from sqlalchemy import Column, ForeignKey, String, Enum +from sqlalchemy import Column, Enum, ForeignKey from typing import Dict, List from common.orm.HighLevel import get_related_objects from common.proto.context_pb2 import ServiceStatusEnum, ServiceTypeEnum @@ -21,12 +21,12 @@ from .ConfigModel import ConfigModel from .ConstraintModel import ConstraintsModel from .ContextModel import ContextModel from .Tools import grpc_to_enum -from sqlalchemy import Column, ForeignKey from sqlalchemy.dialects.postgresql import UUID from context.service.database.Base import Base +import enum LOGGER = logging.getLogger(__name__) -class ORM_ServiceTypeEnum(Enum): +class ORM_ServiceTypeEnum(enum.Enum): UNKNOWN = ServiceTypeEnum.SERVICETYPE_UNKNOWN L3NM = ServiceTypeEnum.SERVICETYPE_L3NM L2NM = ServiceTypeEnum.SERVICETYPE_L2NM @@ -35,7 +35,7 @@ class ORM_ServiceTypeEnum(Enum): grpc_to_enum__service_type = functools.partial( grpc_to_enum, ServiceTypeEnum, ORM_ServiceTypeEnum) -class ORM_ServiceStatusEnum(Enum): +class ORM_ServiceStatusEnum(enum.Enum): UNDEFINED = ServiceStatusEnum.SERVICESTATUS_UNDEFINED PLANNED = ServiceStatusEnum.SERVICESTATUS_PLANNED ACTIVE = ServiceStatusEnum.SERVICESTATUS_ACTIVE @@ -47,24 +47,35 @@ grpc_to_enum__service_status = functools.partial( class ServiceModel(Base): __tablename__ = 'Service' + # pk = PrimaryKeyField() + # context_fk = ForeignKeyField(ContextModel) + context_uuid = Column(UUID(as_uuid=False), ForeignKey("Context.context_uuid")) + # service_uuid = StringField(required=True, allow_empty=False) service_uuid = Column(UUID(as_uuid=False), primary_key=True, unique=True) + # service_type = EnumeratedField(ORM_ServiceTypeEnum, required=True) service_type = Column(Enum(ORM_ServiceTypeEnum, create_constraint=False, native_enum=False, allow_empty=False)) - # service_constraints = Column(UUID(as_uuid=False), ForeignKey("EndPoint.endpoint_uuid", ondelete='SET NULL')) - # context_fk = ForeignKeyField(ContextModel) + # service_constraints_fk = ForeignKeyField(ConstraintsModel) + service_constraints = Column(UUID(as_uuid=False), ForeignKey("Constraints.constraints_uuid")) + # service_status = EnumeratedField(ORM_ServiceStatusEnum, required=True) service_status = Column(Enum(ORM_ServiceStatusEnum, create_constraint=False, native_enum=False, allow_empty=False)) # service_config_fk = ForeignKeyField(ConfigModel) + service_config = Column(UUID(as_uuid=False), ForeignKey("Config.config_uuid")) - def delete(self) -> None: - #pylint: disable=import-outside-toplevel - from .RelationModels import ServiceEndPointModel - - for db_service_endpoint_pk,_ in self.references(ServiceEndPointModel): - ServiceEndPointModel(self.database, db_service_endpoint_pk).delete() + # def delete(self) -> None: + # #pylint: disable=import-outside-toplevel + # from .RelationModels import ServiceEndPointModel + # + # for db_service_endpoint_pk,_ in self.references(ServiceEndPointModel): + # ServiceEndPointModel(self.database, db_service_endpoint_pk).delete() + # + # super().delete() + # + # ConfigModel(self.database, self.service_config_fk).delete() + # ConstraintsModel(self.database, self.service_constraints_fk).delete() - super().delete() + def main_pk_name(self): + return 'context_uuid' - ConfigModel(self.database, self.service_config_fk).delete() - ConstraintsModel(self.database, self.service_constraints_fk).delete() def dump_id(self) -> Dict: context_id = ContextModel(self.database, self.context_fk).dump_id() @@ -73,10 +84,10 @@ class ServiceModel(Base): 'service_uuid': {'uuid': self.service_uuid}, } - def dump_endpoint_ids(self) -> List[Dict]: - from .RelationModels import ServiceEndPointModel # pylint: disable=import-outside-toplevel - db_endpoints = get_related_objects(self, ServiceEndPointModel, 'endpoint_fk') - return [db_endpoint.dump_id() for db_endpoint in sorted(db_endpoints, key=operator.attrgetter('pk'))] + # def dump_endpoint_ids(self, endpoints) -> List[Dict]: + # from .RelationModels import ServiceEndPointModel # pylint: disable=import-outside-toplevel + # db_endpoints = get_related_objects(self, ServiceEndPointModel, 'endpoint_fk') + # return [db_endpoint.dump_id() for db_endpoint in sorted(db_endpoints, key=operator.attrgetter('pk'))] def dump_constraints(self) -> List[Dict]: return ConstraintsModel(self.database, self.service_constraints_fk).dump() @@ -85,14 +96,16 @@ class ServiceModel(Base): return ConfigModel(self.database, self.service_config_fk).dump() def dump( # pylint: disable=arguments-differ - self, include_endpoint_ids=True, include_constraints=True, include_config_rules=True - ) -> Dict: + self, endpoint_ids=True, constraints=True, config_rules=True) -> Dict: result = { 'service_id': self.dump_id(), 'service_type': self.service_type.value, 'service_status': {'service_status': self.service_status.value}, } - if include_endpoint_ids: result['service_endpoint_ids'] = self.dump_endpoint_ids() - if include_constraints: result['service_constraints'] = self.dump_constraints() - if include_config_rules: result.setdefault('service_config', {})['config_rules'] = self.dump_config() + if endpoint_ids: + result['service_endpoint_ids'] = self.dump_endpoint_ids() + if constraints: + result['service_constraints'] = self.dump_constraints() + if config_rules: + result.setdefault('service_config', {})['config_rules'] = self.dump_config() return result diff --git a/src/context/service/grpc_server/ContextServiceServicerImpl.py b/src/context/service/grpc_server/ContextServiceServicerImpl.py index 264ae3198..98c961007 100644 --- a/src/context/service/grpc_server/ContextServiceServicerImpl.py +++ b/src/context/service/grpc_server/ContextServiceServicerImpl.py @@ -17,6 +17,7 @@ import grpc, json, logging, operator, threading from typing import Iterator, List, Set, Tuple, Union from common.message_broker.MessageBroker import MessageBroker from context.service.Database import Database +from common.tools.grpc.Tools import grpc_message_to_json_string from common.proto.context_pb2 import ( Connection, ConnectionEvent, ConnectionId, ConnectionIdList, ConnectionList, @@ -27,7 +28,7 @@ from common.proto.context_pb2 import ( Service, ServiceEvent, ServiceId, ServiceIdList, ServiceList, Slice, SliceEvent, SliceId, SliceIdList, SliceList, Topology, TopologyEvent, TopologyId, TopologyIdList, TopologyList, - ConfigActionEnum) + ConfigActionEnum, Constraint) from common.proto.context_pb2_grpc import ContextServiceServicer from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method from common.rpc_method_wrapper.ServiceExceptions import InvalidArgumentException @@ -60,6 +61,8 @@ from context.service.database.Events import notify_event from context.service.database.EndPointModel import EndPointModel from context.service.database.EndPointModel import KpiSampleTypeModel from context.service.database.LinkModel import LinkModel +from context.service.database.ServiceModel import ServiceModel +from context.service.database.ConstraintModel import ConstraintModel, ConstraintsModel, Union_ConstraintModel, CONSTRAINT_PARSERS from context.service.database.RelationModels import (TopologyDeviceModel, TopologyLinkModel, LinkEndPointModel) from .Constants import ( @@ -640,87 +643,153 @@ class ContextServiceServicerImpl(ContextServiceServicer): @safe_and_metered_rpc_method(METRICS, LOGGER) def ListServiceIds(self, request: ContextId, context : grpc.ServicerContext) -> ServiceIdList: - with self.lock: - db_context : ContextModel = get_object(self.database, ContextModel, request.context_uuid.uuid) - db_services : Set[ServiceModel] = get_related_objects(db_context, ServiceModel) - db_services = sorted(db_services, key=operator.attrgetter('pk')) + context_uuid = request.context_uuid.uuid + + with self.session() as session: + db_services = session.query(ServiceModel).filter_by(context_uuid=context_uuid).all() return ServiceIdList(service_ids=[db_service.dump_id() for db_service in db_services]) @safe_and_metered_rpc_method(METRICS, LOGGER) def ListServices(self, request: ContextId, context : grpc.ServicerContext) -> ServiceList: - with self.lock: - db_context : ContextModel = get_object(self.database, ContextModel, request.context_uuid.uuid) - db_services : Set[ServiceModel] = get_related_objects(db_context, ServiceModel) - db_services = sorted(db_services, key=operator.attrgetter('pk')) - return ServiceList(services=[db_service.dump() for db_service in db_services]) + context_uuid = request.context_uuid.uuid - @safe_and_metered_rpc_method(METRICS, LOGGER) - def GetService(self, request: ServiceId, context : grpc.ServicerContext) -> Service: - with self.lock: - str_key = key_to_str([request.context_id.context_uuid.uuid, request.service_uuid.uuid]) - db_service : ServiceModel = get_object(self.database, ServiceModel, str_key) - return Service(**db_service.dump( - include_endpoint_ids=True, include_constraints=True, include_config_rules=True)) + with self.session() as session: + db_services = session.query(ServiceModel).filter_by(context_uuid=context_uuid).all() + return ServiceList(services=[db_service.dump() for db_service in db_services]) - @safe_and_metered_rpc_method(METRICS, LOGGER) - def SetService(self, request: Service, context : grpc.ServicerContext) -> ServiceId: - with self.lock: - context_uuid = request.service_id.context_id.context_uuid.uuid - db_context : ContextModel = get_object(self.database, ContextModel, context_uuid) - for i,endpoint_id in enumerate(request.service_endpoint_ids): - endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid - if len(endpoint_topology_context_uuid) > 0 and context_uuid != endpoint_topology_context_uuid: - raise InvalidArgumentException( - 'request.service_endpoint_ids[{:d}].topology_id.context_id.context_uuid.uuid'.format(i), - endpoint_topology_context_uuid, - ['should be == {:s}({:s})'.format( - 'request.service_id.context_id.context_uuid.uuid', context_uuid)]) - service_uuid = request.service_id.service_uuid.uuid - str_service_key = key_to_str([context_uuid, service_uuid]) + @safe_and_metered_rpc_method(METRICS, LOGGER) + def GetService(self, request: ServiceId, context : grpc.ServicerContext) -> Service: + service_uuid = request.service_uuid.uuid + with self.session() as session: + result = session.query(ServiceModel).filter_by(service_uuid=service_uuid).one_or_none() - constraints_result = set_constraints( - self.database, str_service_key, 'constraints', request.service_constraints) - db_constraints = constraints_result[0][0] + if not result: + raise NotFoundException(ServiceModel.__name__.replace('Model', ''), service_uuid) - config_rules = grpc_config_rules_to_raw(request.service_config.config_rules) - running_config_result = update_config(self.database, str_service_key, 'running', config_rules) - db_running_config = running_config_result[0][0] + return Service(**result.dump()) - result : Tuple[ServiceModel, bool] = update_or_create_object(self.database, ServiceModel, str_service_key, { - 'context_fk' : db_context, - 'service_uuid' : service_uuid, - 'service_type' : grpc_to_enum__service_type(request.service_type), - 'service_constraints_fk': db_constraints, - 'service_status' : grpc_to_enum__service_status(request.service_status.service_status), - 'service_config_fk' : db_running_config, - }) - db_service, updated = result + def set_constraint(self, db_constraints: ConstraintsModel, grpc_constraint: Constraint, position: int + ) -> Tuple[Union_ConstraintModel, bool]: + with self.session() as session: - for i,endpoint_id in enumerate(request.service_endpoint_ids): - endpoint_uuid = endpoint_id.endpoint_uuid.uuid - endpoint_device_uuid = endpoint_id.device_id.device_uuid.uuid - endpoint_topology_uuid = endpoint_id.topology_id.topology_uuid.uuid - endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid + grpc_constraint_kind = str(grpc_constraint.WhichOneof('constraint')) + + parser = CONSTRAINT_PARSERS.get(grpc_constraint_kind) + if parser is None: + raise NotImplementedError('Constraint of kind {:s} is not implemented: {:s}'.format( + grpc_constraint_kind, grpc_message_to_json_string(grpc_constraint))) + + # create specific constraint + constraint_class, str_constraint_id, constraint_data, constraint_kind = parser(grpc_constraint) + LOGGER.info('str_constraint_id: {}'.format(str_constraint_id)) + # str_constraint_key_hash = fast_hasher(':'.join([constraint_kind.value, str_constraint_id])) + # str_constraint_key = key_to_str([db_constraints.pk, str_constraint_key_hash], separator=':') + + # result : Tuple[Union_ConstraintModel, bool] = update_or_create_object( + # database, constraint_class, str_constraint_key, constraint_data) + constraint_data[constraint_class.main_pk_name()] = str_constraint_id + db_new_constraint = constraint_class(**constraint_data) + result: Tuple[Union_ConstraintModel, bool] = self.database.create_or_update(db_new_constraint) + db_specific_constraint, updated = result + + # create generic constraint + # constraint_fk_field_name = 'constraint_uuid'.format(constraint_kind.value) + constraint_data = { + 'constraint_uuid': db_constraints.constraint_uuid, 'position': position, 'kind': constraint_kind + } - str_endpoint_key = key_to_str([endpoint_device_uuid, endpoint_uuid]) - if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0: - str_topology_key = key_to_str([endpoint_topology_context_uuid, endpoint_topology_uuid]) - str_endpoint_key = key_to_str([str_endpoint_key, str_topology_key], separator=':') + db_new_constraint = ConstraintModel(**constraint_data) + result: Tuple[Union_ConstraintModel, bool] = self.database.create_or_update(db_new_constraint) + db_constraint, updated = result - db_endpoint : EndPointModel = get_object(self.database, EndPointModel, str_endpoint_key) + return db_constraint, updated - str_service_endpoint_key = key_to_str([service_uuid, str_endpoint_key], separator='--') - result : Tuple[ServiceEndPointModel, bool] = get_or_create_object( - self.database, ServiceEndPointModel, str_service_endpoint_key, { - 'service_fk': db_service, 'endpoint_fk': db_endpoint}) - #db_service_endpoint, service_endpoint_created = result + def set_constraints(self, service_uuid: str, constraints_name : str, grpc_constraints + ) -> List[Tuple[Union[ConstraintsModel, ConstraintModel], bool]]: + with self.session() as session: + # str_constraints_key = key_to_str([db_parent_pk, constraints_name], separator=':') + # result : Tuple[ConstraintsModel, bool] = get_or_create_object(database, ConstraintsModel, str_constraints_key) + result = session.query(ConstraintsModel).filter_by(constraints_uuid=service_uuid).one_or_none() + created = None + if result: + created = True + session.query(ConstraintsModel).filter_by(constraints_uuid=service_uuid).one_or_none() + db_constraints = ConstraintsModel(constraints_uuid=service_uuid) + session.add(db_constraints) + + db_objects = [(db_constraints, created)] + + for position,grpc_constraint in enumerate(grpc_constraints): + result : Tuple[ConstraintModel, bool] = self.set_constraint( + db_constraints, grpc_constraint, position) + db_constraint, updated = result + db_objects.append((db_constraint, updated)) + + return db_objects - event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE - dict_service_id = db_service.dump_id() - notify_event(self.messagebroker, TOPIC_SERVICE, event_type, {'service_id': dict_service_id}) - return ServiceId(**dict_service_id) + @safe_and_metered_rpc_method(METRICS, LOGGER) + def SetService(self, request: Service, context : grpc.ServicerContext) -> ServiceId: + with self.lock: + with self.session() as session: + + context_uuid = request.service_id.context_id.context_uuid.uuid + # db_context : ContextModel = get_object(self.database, ContextModel, context_uuid) + db_context = session.query(ContextModel).filter_by(context_uuid=context_uuid).one_or_none() + + for i,endpoint_id in enumerate(request.service_endpoint_ids): + endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid + if len(endpoint_topology_context_uuid) > 0 and context_uuid != endpoint_topology_context_uuid: + raise InvalidArgumentException( + 'request.service_endpoint_ids[{:d}].topology_id.context_id.context_uuid.uuid'.format(i), + endpoint_topology_context_uuid, + ['should be == {:s}({:s})'.format( + 'request.service_id.context_id.context_uuid.uuid', context_uuid)]) + + service_uuid = request.service_id.service_uuid.uuid + # str_service_key = key_to_str([context_uuid, service_uuid]) + + constraints_result = self.set_constraints(service_uuid, 'constraints', request.service_constraints) + db_constraints = constraints_result[0][0] + + config_rules = grpc_config_rules_to_raw(request.service_config.config_rules) + running_config_result = update_config(self.database, str_service_key, 'running', config_rules) + db_running_config = running_config_result[0][0] + + result : Tuple[ServiceModel, bool] = update_or_create_object(self.database, ServiceModel, str_service_key, { + 'context_fk' : db_context, + 'service_uuid' : service_uuid, + 'service_type' : grpc_to_enum__service_type(request.service_type), + 'service_constraints_fk': db_constraints, + 'service_status' : grpc_to_enum__service_status(request.service_status.service_status), + 'service_config_fk' : db_running_config, + }) + db_service, updated = result + + for i,endpoint_id in enumerate(request.service_endpoint_ids): + endpoint_uuid = endpoint_id.endpoint_uuid.uuid + endpoint_device_uuid = endpoint_id.device_id.device_uuid.uuid + endpoint_topology_uuid = endpoint_id.topology_id.topology_uuid.uuid + endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid + + str_endpoint_key = key_to_str([endpoint_device_uuid, endpoint_uuid]) + if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0: + str_topology_key = key_to_str([endpoint_topology_context_uuid, endpoint_topology_uuid]) + str_endpoint_key = key_to_str([str_endpoint_key, str_topology_key], separator=':') + + db_endpoint : EndPointModel = get_object(self.database, EndPointModel, str_endpoint_key) + + str_service_endpoint_key = key_to_str([service_uuid, str_endpoint_key], separator='--') + result : Tuple[ServiceEndPointModel, bool] = get_or_create_object( + self.database, ServiceEndPointModel, str_service_endpoint_key, { + 'service_fk': db_service, 'endpoint_fk': db_endpoint}) + #db_service_endpoint, service_endpoint_created = result + + event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE + dict_service_id = db_service.dump_id() + notify_event(self.messagebroker, TOPIC_SERVICE, event_type, {'service_id': dict_service_id}) + return ServiceId(**dict_service_id) @safe_and_metered_rpc_method(METRICS, LOGGER) def RemoveService(self, request: ServiceId, context : grpc.ServicerContext) -> Empty: @@ -743,7 +812,6 @@ class ContextServiceServicerImpl(ContextServiceServicer): for message in self.messagebroker.consume({TOPIC_SERVICE}, consume_timeout=CONSUME_TIMEOUT): yield ServiceEvent(**json.loads(message.content)) - """ # ----- Slice ---------------------------------------------------------------------------------------------------- @@ -881,6 +949,10 @@ class ContextServiceServicerImpl(ContextServiceServicer): @safe_and_metered_rpc_method(METRICS, LOGGER) def ListConnectionIds(self, request: ServiceId, context : grpc.ServicerContext) -> ConnectionIdList: + with self.session() as session: + result = session.query(DeviceModel).all() + return DeviceIdList(device_ids=[device.dump_id() for device in result]) + with self.lock: str_key = key_to_str([request.context_id.context_uuid.uuid, request.service_uuid.uuid]) db_service : ServiceModel = get_object(self.database, ServiceModel, str_key) @@ -960,4 +1032,3 @@ class ContextServiceServicerImpl(ContextServiceServicer): def GetConnectionEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[ConnectionEvent]: for message in self.messagebroker.consume({TOPIC_CONNECTION}, consume_timeout=CONSUME_TIMEOUT): yield ConnectionEvent(**json.loads(message.content)) - """ \ No newline at end of file diff --git a/src/context/tests/Objects.py b/src/context/tests/Objects.py index a2aebdd96..a0c4f8232 100644 --- a/src/context/tests/Objects.py +++ b/src/context/tests/Objects.py @@ -128,11 +128,11 @@ LINK_R1_R3 = json_link(LINK_R1_R3_UUID, LINK_R1_R3_EPIDS) # ----- Service -------------------------------------------------------------------------------------------------------- -SERVICE_R1_R2_UUID = 'SVC:R1/EP100-R2/EP100' +SERVICE_R1_R2_UUID = 'f0432e7b-bb83-4880-9c5d-008c4925ce7d' SERVICE_R1_R2_ID = json_service_id(SERVICE_R1_R2_UUID, context_id=CONTEXT_ID) SERVICE_R1_R2_EPIDS = [ - json_endpoint_id(DEVICE_R1_ID, 'EP100', topology_id=TOPOLOGY_ID), - json_endpoint_id(DEVICE_R2_ID, 'EP100', topology_id=TOPOLOGY_ID), + json_endpoint_id(DEVICE_R1_ID, EP100, topology_id=TOPOLOGY_ID), + json_endpoint_id(DEVICE_R2_ID, EP100, topology_id=TOPOLOGY_ID), ] SERVICE_R1_R2_CONST = [ json_constraint('latency_ms', '15.2'), @@ -148,7 +148,7 @@ SERVICE_R1_R2 = json_service_l3nm_planned( config_rules=SERVICE_R1_R2_RULES) -SERVICE_R1_R3_UUID = 'SVC:R1/EP100-R3/EP100' +SERVICE_R1_R3_UUID = 'fab21cef-542a-4948-bb4a-a0468abfa925' SERVICE_R1_R3_ID = json_service_id(SERVICE_R1_R3_UUID, context_id=CONTEXT_ID) SERVICE_R1_R3_EPIDS = [ json_endpoint_id(DEVICE_R1_ID, 'EP100', topology_id=TOPOLOGY_ID), @@ -168,7 +168,7 @@ SERVICE_R1_R3 = json_service_l3nm_planned( config_rules=SERVICE_R1_R3_RULES) -SERVICE_R2_R3_UUID = 'SVC:R2/EP100-R3/EP100' +SERVICE_R2_R3_UUID = '1f2a808f-62bb-4eaa-94fb-448ed643e61a' SERVICE_R2_R3_ID = json_service_id(SERVICE_R2_R3_UUID, context_id=CONTEXT_ID) SERVICE_R2_R3_EPIDS = [ json_endpoint_id(DEVICE_R2_ID, 'EP100', topology_id=TOPOLOGY_ID), diff --git a/src/context/tests/test_unitary.py b/src/context/tests/test_unitary.py index f238e95d9..40234adcb 100644 --- a/src/context/tests/test_unitary.py +++ b/src/context/tests/test_unitary.py @@ -42,8 +42,6 @@ from context.service.rest_server.Resources import RESOURCES from requests import Session from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker -from context.service.database.ContextModel import ContextModel -from context.service.database.TopologyModel import TopologyModel from context.service.database.Base import Base from .Objects import ( @@ -106,7 +104,6 @@ def context_service_grpc(context_s_mb : Tuple[Database, MessageBroker]): # pylin _service.start() yield _service _service.stop() -""" @pytest.fixture(scope='session') def context_service_rest(context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name database = context_db_mb[0] @@ -118,7 +115,6 @@ def context_service_rest(context_db_mb : Tuple[Database, MessageBroker]): # pyli yield _rest_server _rest_server.shutdown() _rest_server.join() -""" @pytest.fixture(scope='session') def context_client_grpc(context_service_grpc : ContextService): # pylint: disable=redefined-outer-name _client = ContextClient() @@ -135,7 +131,7 @@ def do_rest_request(url : str): return reply.json() """ -# ----- Test gRPC methods ---------------------------------------------------------------------------------------------- +"""# ----- Test gRPC methods ---------------------------------------------------------------------------------------------- def test_grpc_context( context_client_grpc : ContextClient, # pylint: disable=redefined-outer-name context_s_mb : Tuple[Session, MessageBroker]): # pylint: disable=redefined-outer-name @@ -163,7 +159,7 @@ def test_grpc_context( assert len(response.contexts) == 0 # ----- Dump state of database before create the object ------------------------------------------------------------ - db_entries = database.get_all(ContextModel) + db_entries = database.dump_all() LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) for db_entry in db_entries: LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover @@ -213,11 +209,11 @@ def test_grpc_context( assert event.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID # ----- Dump state of database after create/update the object ------------------------------------------------------ - db_entries = database.get_all(ContextModel) + db_entries = database.dump_all() LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - # for db_entry in db_entries: - # LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover + for db_entry in db_entries: + LOGGER.info(db_entry) LOGGER.info('-----------------------------------------------------------') assert len(db_entries) == 1 @@ -251,14 +247,15 @@ def test_grpc_context( events_collector.stop() # ----- Dump state of database after remove the object ------------------------------------------------------------- - db_entries = database.get_all(ContextModel) + db_entries = database.dump_all() LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - # for db_entry in db_entries: - # LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover + for db_entry in db_entries: + LOGGER.info(db_entry) LOGGER.info('-----------------------------------------------------------') assert len(db_entries) == 0 + def test_grpc_topology( context_client_grpc: ContextClient, # pylint: disable=redefined-outer-name context_s_mb: Tuple[Session, MessageBroker]): # pylint: disable=redefined-outer-name @@ -294,12 +291,12 @@ def test_grpc_topology( assert len(response.topologies) == 0 # ----- Dump state of database before create the object ------------------------------------------------------------ - db_entries = database.get_all(TopologyModel) + db_entries = database.dump_all() LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - # for db_entry in db_entries: - # LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover + for db_entry in db_entries: + LOGGER.info(db_entry) LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 0 + assert len(db_entries) == 1 # ----- Create the object ------------------------------------------------------------------------------------------ response = context_client_grpc.SetTopology(Topology(**TOPOLOGY)) @@ -336,12 +333,12 @@ def test_grpc_topology( # assert event.topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID # ----- Dump state of database after create/update the object ------------------------------------------------------ - db_entries = database.get_all(TopologyModel) + db_entries = database.dump_all() LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - # for db_entry in db_entries: - # LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover + for db_entry in db_entries: + LOGGER.info(db_entry) LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 1 + assert len(db_entries) == 2 # ----- Get when the object exists --------------------------------------------------------------------------------- response = context_client_grpc.GetTopology(TopologyId(**TOPOLOGY_ID)) @@ -383,13 +380,14 @@ def test_grpc_topology( # events_collector.stop() # ----- Dump state of database after remove the object ------------------------------------------------------------- - db_entries = database.get_all(TopologyModel) + db_entries = database.dump_all() LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - # for db_entry in db_entries: - # LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover + for db_entry in db_entries: + LOGGER.info(db_entry) LOGGER.info('-----------------------------------------------------------') assert len(db_entries) == 0 + def test_grpc_device( context_client_grpc: ContextClient, # pylint: disable=redefined-outer-name context_s_mb: Tuple[Session, MessageBroker]): # pylint: disable=redefined-outer-name @@ -439,8 +437,8 @@ def test_grpc_device( # ----- Dump state of database before create the object ------------------------------------------------------------ db_entries = database.dump_all() LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - # for db_entry in db_entries: - # LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover + for db_entry in db_entries: + LOGGER.info(db_entry) LOGGER.info('-----------------------------------------------------------') assert len(db_entries) == 2 @@ -476,8 +474,8 @@ def test_grpc_device( # ----- Dump state of database after create/update the object ------------------------------------------------------ db_entries = database.dump_all() LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - # for db_entry in db_entries: - # LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover + for db_entry in db_entries: + LOGGER.info(db_entry) LOGGER.info('-----------------------------------------------------------') assert len(db_entries) == 36 @@ -529,12 +527,12 @@ def test_grpc_device( # ----- Dump state of database after creating the object relation -------------------------------------------------- db_entries = database.dump_all() LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - # for db_entry in db_entries: - # LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover + for db_entry in db_entries: + LOGGER.info(db_entry) LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 33 + assert len(db_entries) == 36 - # ----- Remove the object ------------------------------------------------------------------------------------------ + # ----- Remove the object -------------------------------ro----------------------------------------------------------- context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R1_ID)) context_client_grpc.RemoveTopology(TopologyId(**TOPOLOGY_ID)) context_client_grpc.RemoveContext(ContextId(**CONTEXT_ID)) @@ -561,19 +559,21 @@ def test_grpc_device( # ----- Dump state of database after remove the object ------------------------------------------------------------- db_entries = database.dump_all() LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - # for db_entry in db_entries: - # LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover + for db_entry in db_entries: + LOGGER.info(db_entry) LOGGER.info('-----------------------------------------------------------') assert len(db_entries) == 0 - """ + def test_grpc_link( - context_client_grpc : ContextClient, # pylint: disable=redefined-outer-name - context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name - context_database = context_db_mb[0] + context_client_grpc: ContextClient, # pylint: disable=redefined-outer-name + context_s_mb: Tuple[Session, MessageBroker]): # pylint: disable=redefined-outer-name + session = context_s_mb[0] + + database = Database(session) # ----- Clean the database ----------------------------------------------------------------------------------------- - context_database.clear_all() + database.clear() # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- events_collector = EventsCollector(context_client_grpc) @@ -592,25 +592,24 @@ def test_grpc_link( response = context_client_grpc.SetDevice(Device(**DEVICE_R2)) assert response.device_uuid.uuid == DEVICE_R2_UUID + # events = events_collector.get_events(block=True, count=4) - events = events_collector.get_events(block=True, count=4) - - assert isinstance(events[0], ContextEvent) - assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - assert isinstance(events[1], TopologyEvent) - assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[1].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert events[1].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - - assert isinstance(events[2], DeviceEvent) - assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[2].device_id.device_uuid.uuid == DEVICE_R1_UUID - - assert isinstance(events[3], DeviceEvent) - assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[3].device_id.device_uuid.uuid == DEVICE_R2_UUID + # assert isinstance(events[0], ContextEvent) + # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + # assert events[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + # + # assert isinstance(events[1], TopologyEvent) + # assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + # assert events[1].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + # assert events[1].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + # + # assert isinstance(events[2], DeviceEvent) + # assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + # assert events[2].device_id.device_uuid.uuid == DEVICE_R1_UUID + # + # assert isinstance(events[3], DeviceEvent) + # assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + # assert events[3].device_id.device_uuid.uuid == DEVICE_R2_UUID # ----- Get when the object does not exist ------------------------------------------------------------------------- with pytest.raises(grpc.RpcError) as e: @@ -626,40 +625,39 @@ def test_grpc_link( assert len(response.links) == 0 # ----- Dump state of database before create the object ------------------------------------------------------------ - db_entries = context_database.dump() + db_entries = database.dump_all() LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) for db_entry in db_entries: - LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover + LOGGER.info(db_entry) LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 67 + assert len(db_entries) == 44 # ----- Create the object ------------------------------------------------------------------------------------------ response = context_client_grpc.SetLink(Link(**LINK_R1_R2)) assert response.link_uuid.uuid == LINK_R1_R2_UUID # ----- Check create event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True) - assert isinstance(event, LinkEvent) - assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert event.link_id.link_uuid.uuid == LINK_R1_R2_UUID + # event = events_collector.get_event(block=True) + # assert isinstance(event, LinkEvent) + # assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE + # assert event.link_id.link_uuid.uuid == LINK_R1_R2_UUID # ----- Update the object ------------------------------------------------------------------------------------------ response = context_client_grpc.SetLink(Link(**LINK_R1_R2)) assert response.link_uuid.uuid == LINK_R1_R2_UUID - # ----- Check update event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True) - assert isinstance(event, LinkEvent) - assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - assert event.link_id.link_uuid.uuid == LINK_R1_R2_UUID + # event = events_collector.get_event(block=True) + # assert isinstance(event, LinkEvent) + # assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + # assert event.link_id.link_uuid.uuid == LINK_R1_R2_UUID # ----- Dump state of database after create/update the object ------------------------------------------------------ - db_entries = context_database.dump() + db_entries = database.dump_all() LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) for db_entry in db_entries: - LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover + LOGGER.info(db_entry) LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 75 + assert len(db_entries) == 48 # ----- Get when the object exists --------------------------------------------------------------------------------- response = context_client_grpc.GetLink(LinkId(**LINK_R1_R2_ID)) @@ -674,6 +672,7 @@ def test_grpc_link( response = context_client_grpc.ListLinks(Empty()) assert len(response.links) == 1 assert response.links[0].link_id.link_uuid.uuid == LINK_R1_R2_UUID + assert len(response.links[0].link_endpoint_ids) == 2 # ----- Create object relation ------------------------------------------------------------------------------------- @@ -684,28 +683,28 @@ def test_grpc_link( assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID # ----- Check update event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True) - assert isinstance(event, TopologyEvent) - assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + # event = events_collector.get_event(block=True) + # assert isinstance(event, TopologyEvent) + # assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + # assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + # assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID # ----- Check relation was created --------------------------------------------------------------------------------- response = context_client_grpc.GetTopology(TopologyId(**TOPOLOGY_ID)) assert response.topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID assert response.topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID assert len(response.device_ids) == 2 - assert response.device_ids[0].device_uuid.uuid == DEVICE_R1_UUID - assert response.device_ids[1].device_uuid.uuid == DEVICE_R2_UUID + # assert response.device_ids[0].device_uuid.uuid == DEVICE_R1_UUID + # assert response.device_ids[1].device_uuid.uuid == DEVICE_R2_UUID assert len(response.link_ids) == 1 assert response.link_ids[0].link_uuid.uuid == LINK_R1_R2_UUID - db_entries = context_database.dump() + db_entries = database.dump_all() LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) for db_entry in db_entries: - LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover + LOGGER.info(db_entry) LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 75 + assert len(db_entries) == 48 # ----- Remove the object ------------------------------------------------------------------------------------------ context_client_grpc.RemoveLink(LinkId(**LINK_R1_R2_ID)) @@ -715,48 +714,47 @@ def test_grpc_link( context_client_grpc.RemoveContext(ContextId(**CONTEXT_ID)) # ----- Check remove event ----------------------------------------------------------------------------------------- - events = events_collector.get_events(block=True, count=5) - - assert isinstance(events[0], LinkEvent) - assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[0].link_id.link_uuid.uuid == LINK_R1_R2_UUID - - assert isinstance(events[1], DeviceEvent) - assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[1].device_id.device_uuid.uuid == DEVICE_R1_UUID - - assert isinstance(events[2], DeviceEvent) - assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[2].device_id.device_uuid.uuid == DEVICE_R2_UUID - - assert isinstance(events[3], TopologyEvent) - assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[3].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert events[3].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - - assert isinstance(events[4], ContextEvent) - assert events[4].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[4].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + # events = events_collector.get_events(block=True, count=5) + # + # assert isinstance(events[0], LinkEvent) + # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + # assert events[0].link_id.link_uuid.uuid == LINK_R1_R2_UUID + # + # assert isinstance(events[1], DeviceEvent) + # assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + # assert events[1].device_id.device_uuid.uuid == DEVICE_R1_UUID + # + # assert isinstance(events[2], DeviceEvent) + # assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + # assert events[2].device_id.device_uuid.uuid == DEVICE_R2_UUID + # + # assert isinstance(events[3], TopologyEvent) + # assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + # assert events[3].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + # assert events[3].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + # + # assert isinstance(events[4], ContextEvent) + # assert events[4].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + # assert events[4].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- events_collector.stop() # ----- Dump state of database after remove the object ------------------------------------------------------------- - db_entries = context_database.dump() + db_entries = database.dump_all() LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) for db_entry in db_entries: - LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover + LOGGER.info(db_entry) LOGGER.info('-----------------------------------------------------------') assert len(db_entries) == 0 - def test_grpc_service( context_client_grpc : ContextClient, # pylint: disable=redefined-outer-name - context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name - context_database = context_db_mb[0] - + context_s_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name + Session = context_s_mb[0] # ----- Clean the database ----------------------------------------------------------------------------------------- - context_database.clear_all() + database = Database(Session) + database.clear() # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- events_collector = EventsCollector(context_client_grpc) @@ -775,55 +773,58 @@ def test_grpc_service( response = context_client_grpc.SetDevice(Device(**DEVICE_R2)) assert response.device_uuid.uuid == DEVICE_R2_UUID - - events = events_collector.get_events(block=True, count=4) - - assert isinstance(events[0], ContextEvent) - assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - assert isinstance(events[1], TopologyEvent) - assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[1].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert events[1].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - - assert isinstance(events[2], DeviceEvent) - assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[2].device_id.device_uuid.uuid == DEVICE_R1_UUID - - assert isinstance(events[3], DeviceEvent) - assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[3].device_id.device_uuid.uuid == DEVICE_R2_UUID + # events = events_collector.get_events(block=True, count=4) + # + # assert isinstance(events[0], ContextEvent) + # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + # assert events[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + # + # assert isinstance(events[1], TopologyEvent) + # assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + # assert events[1].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + # assert events[1].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + # + # assert isinstance(events[2], DeviceEvent) + # assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + # assert events[2].device_id.device_uuid.uuid == DEVICE_R1_UUID + # + # assert isinstance(events[3], DeviceEvent) + # assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + # assert events[3].device_id.device_uuid.uuid == DEVICE_R2_UUID + LOGGER.info('----------------') # ----- Get when the object does not exist ------------------------------------------------------------------------- with pytest.raises(grpc.RpcError) as e: context_client_grpc.GetService(ServiceId(**SERVICE_R1_R2_ID)) assert e.value.code() == grpc.StatusCode.NOT_FOUND - assert e.value.details() == 'Service({:s}/{:s}) not found'.format(DEFAULT_CONTEXT_UUID, SERVICE_R1_R2_UUID) + assert e.value.details() == 'Service({:s}) not found'.format(SERVICE_R1_R2_UUID) + LOGGER.info('----------------') # ----- List when the object does not exist ------------------------------------------------------------------------ response = context_client_grpc.ListServiceIds(ContextId(**CONTEXT_ID)) assert len(response.service_ids) == 0 + LOGGER.info('----------------') response = context_client_grpc.ListServices(ContextId(**CONTEXT_ID)) assert len(response.services) == 0 + LOGGER.info('----------------') # ----- Dump state of database before create the object ------------------------------------------------------------ - db_entries = context_database.dump() + db_entries = database.dump_all() LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) for db_entry in db_entries: - LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover + LOGGER.info(db_entry) LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 67 + assert len(db_entries) == 44 # ----- Create the object ------------------------------------------------------------------------------------------ with pytest.raises(grpc.RpcError) as e: WRONG_SERVICE = copy.deepcopy(SERVICE_R1_R2) WRONG_SERVICE['service_endpoint_ids'][0]\ - ['topology_id']['context_id']['context_uuid']['uuid'] = 'wrong-context-uuid' + ['topology_id']['context_id']['context_uuid']['uuid'] = 'ca1ea172-728f-441d-972c-feeae8c9bffc' context_client_grpc.SetService(Service(**WRONG_SERVICE)) assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT - msg = 'request.service_endpoint_ids[0].topology_id.context_id.context_uuid.uuid(wrong-context-uuid) is invalid; '\ + msg = 'request.service_endpoint_ids[0].topology_id.context_id.context_uuid.uuid(ca1ea172-728f-441d-972c-feeae8c9bffc) is invalid; '\ 'should be == request.service_id.context_id.context_uuid.uuid({:s})'.format(DEFAULT_CONTEXT_UUID) assert e.value.details() == msg @@ -935,15 +936,18 @@ def test_grpc_service( LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover LOGGER.info('-----------------------------------------------------------') assert len(db_entries) == 0 +""" def test_grpc_connection( context_client_grpc : ContextClient, # pylint: disable=redefined-outer-name context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name - context_database = context_db_mb[0] + Session = context_s_mb[0] + + database = Database(Session) # ----- Clean the database ----------------------------------------------------------------------------------------- - context_database.clear_all() + database.clear() # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- events_collector = EventsCollector(context_client_grpc) @@ -1188,6 +1192,7 @@ def test_grpc_connection( LOGGER.info('-----------------------------------------------------------') assert len(db_entries) == 0 +""" # ----- Test REST API methods ------------------------------------------------------------------------------------------ -- GitLab From fe2b6c2f511f4aa6db2722af0efc0afb77ea9463 Mon Sep 17 00:00:00 2001 From: mansoca <carlos.manso@cttc.es> Date: Wed, 14 Dec 2022 12:00:33 +0100 Subject: [PATCH 010/158] Update scalability --- .../service/database/ConnectionModel.py | 31 ++++++++++++++----- .../service/database/ConstraintModel.py | 1 + .../grpc_server/ContextServiceServicerImpl.py | 3 +- src/context/tests/test_unitary.py | 5 +-- 4 files changed, 29 insertions(+), 11 deletions(-) diff --git a/src/context/service/database/ConnectionModel.py b/src/context/service/database/ConnectionModel.py index 4cbed43a4..1147f3859 100644 --- a/src/context/service/database/ConnectionModel.py +++ b/src/context/service/database/ConnectionModel.py @@ -19,7 +19,6 @@ from common.orm.backend.Tools import key_to_str from common.orm.fields.ForeignKeyField import ForeignKeyField from common.orm.fields.IntegerField import IntegerField from common.orm.fields.PrimaryKeyField import PrimaryKeyField -from common.orm.fields.StringField import StringField from common.orm.model.Model import Model from common.orm.HighLevel import get_object, get_or_create_object, get_related_objects, update_or_create_object from common.proto.context_pb2 import EndPointId @@ -27,10 +26,24 @@ from .EndPointModel import EndPointModel from .ServiceModel import ServiceModel from .Tools import remove_dict_key + +from sqlalchemy import Column, Enum, ForeignKey, Integer, CheckConstraint +from typing import Dict, List +from common.orm.HighLevel import get_related_objects +from common.proto.context_pb2 import ServiceStatusEnum, ServiceTypeEnum +from .ConfigModel import ConfigModel +from .ConstraintModel import ConstraintsModel +from .ContextModel import ContextModel +from .Tools import grpc_to_enum +from sqlalchemy.dialects.postgresql import UUID +from context.service.database.Base import Base +import enum +LOGGER = logging.getLogger(__name__) + LOGGER = logging.getLogger(__name__) class PathModel(Model): # pylint: disable=abstract-method - pk = PrimaryKeyField() + path_uuid = Column(UUID(as_uuid=False), primary_key=True, unique=True) def delete(self) -> None: for db_path_hop_pk,_ in self.references(PathHopModel): @@ -44,10 +57,10 @@ class PathModel(Model): # pylint: disable=abstract-method return [remove_dict_key(path_hop, 'position') for path_hop in path_hops] class PathHopModel(Model): # pylint: disable=abstract-method - pk = PrimaryKeyField() - path_fk = ForeignKeyField(PathModel) - position = IntegerField(min_value=0, required=True) - endpoint_fk = ForeignKeyField(EndPointModel) + path_hop_uuid = Column(UUID(as_uuid=False), primary_key=True, unique=True) + path_uuid = Column(UUID(as_uuid=False), ForeignKey("Path.path_uuid")) + position = Column(Integer, CheckConstraint('position >= 0'), nullable=False) + endpoint_uuid = Column(UUID(as_uuid=False), ForeignKey("EndPoint.endpoint_uuid")) def dump(self, include_position=True) -> Dict: # pylint: disable=arguments-differ db_endpoint : EndPointModel = EndPointModel(self.database, self.endpoint_fk) @@ -57,8 +70,10 @@ class PathHopModel(Model): # pylint: disable=abstract-method class ConnectionModel(Model): pk = PrimaryKeyField() - connection_uuid = StringField(required=True, allow_empty=False) - service_fk = ForeignKeyField(ServiceModel, required=False) + # connection_uuid = StringField(required=True, allow_empty=False) + connection_uuid = Column(UUID(as_uuid=False), primary_key=True, unique=True) + # service_fk = ForeignKeyField(ServiceModel, required=False) + service_uuid = Column(UUID(as_uuid=False), ForeignKey("Service.service_uuid")) path_fk = ForeignKeyField(PathModel, required=True) def delete(self) -> None: diff --git a/src/context/service/database/ConstraintModel.py b/src/context/service/database/ConstraintModel.py index c5ed7504d..61c25289e 100644 --- a/src/context/service/database/ConstraintModel.py +++ b/src/context/service/database/ConstraintModel.py @@ -144,6 +144,7 @@ class ConstraintModel(Base): # pylint: disable=abstract-method __tablename__ = 'Constraint' # pk = PrimaryKeyField() # constraints_fk = ForeignKeyField(ConstraintsModel) + constraint_uuid = Column(UUID(as_uuid=False), primary_key=True, unique=True) constraints_uuid = Column(UUID(as_uuid=False), ForeignKey("Constraints.constraints_uuid"), primary_key=True) # kind = EnumeratedField(ConstraintKindEnum) kind = Column(Enum(ConstraintKindEnum, create_constraint=False, native_enum=False)) diff --git a/src/context/service/grpc_server/ContextServiceServicerImpl.py b/src/context/service/grpc_server/ContextServiceServicerImpl.py index 98c961007..62c281205 100644 --- a/src/context/service/grpc_server/ContextServiceServicerImpl.py +++ b/src/context/service/grpc_server/ContextServiceServicerImpl.py @@ -683,6 +683,7 @@ class ContextServiceServicerImpl(ContextServiceServicer): # create specific constraint constraint_class, str_constraint_id, constraint_data, constraint_kind = parser(grpc_constraint) + str_constraint_id = str(uuid.uuid4()) LOGGER.info('str_constraint_id: {}'.format(str_constraint_id)) # str_constraint_key_hash = fast_hasher(':'.join([constraint_kind.value, str_constraint_id])) # str_constraint_key = key_to_str([db_constraints.pk, str_constraint_key_hash], separator=':') @@ -697,7 +698,7 @@ class ContextServiceServicerImpl(ContextServiceServicer): # create generic constraint # constraint_fk_field_name = 'constraint_uuid'.format(constraint_kind.value) constraint_data = { - 'constraint_uuid': db_constraints.constraint_uuid, 'position': position, 'kind': constraint_kind + 'constraints_uuid': db_constraints.constraints_uuid, 'position': position, 'kind': constraint_kind } db_new_constraint = ConstraintModel(**constraint_data) diff --git a/src/context/tests/test_unitary.py b/src/context/tests/test_unitary.py index 40234adcb..6d70790ee 100644 --- a/src/context/tests/test_unitary.py +++ b/src/context/tests/test_unitary.py @@ -747,6 +747,7 @@ def test_grpc_link( LOGGER.info(db_entry) LOGGER.info('-----------------------------------------------------------') assert len(db_entries) == 0 +""" def test_grpc_service( context_client_grpc : ContextClient, # pylint: disable=redefined-outer-name @@ -936,9 +937,10 @@ def test_grpc_service( LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover LOGGER.info('-----------------------------------------------------------') assert len(db_entries) == 0 -""" +""" + def test_grpc_connection( context_client_grpc : ContextClient, # pylint: disable=redefined-outer-name context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name @@ -1192,7 +1194,6 @@ def test_grpc_connection( LOGGER.info('-----------------------------------------------------------') assert len(db_entries) == 0 -""" # ----- Test REST API methods ------------------------------------------------------------------------------------------ -- GitLab From d4b92b6b93552449655151839a70400d9f0f7337 Mon Sep 17 00:00:00 2001 From: mansoca <carlos.manso@cttc.es> Date: Wed, 14 Dec 2022 12:06:11 +0100 Subject: [PATCH 011/158] Cockroachdb files --- cluster-init.yaml | 20 ++++ cockroachdb-statefulset.yaml | 182 +++++++++++++++++++++++++++++++++++ 2 files changed, 202 insertions(+) create mode 100644 cluster-init.yaml create mode 100644 cockroachdb-statefulset.yaml diff --git a/cluster-init.yaml b/cluster-init.yaml new file mode 100644 index 000000000..6590ba127 --- /dev/null +++ b/cluster-init.yaml @@ -0,0 +1,20 @@ +# Generated file, DO NOT EDIT. Source: cloud/kubernetes/templates/cluster-init.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: cluster-init + labels: + app: cockroachdb +spec: + template: + spec: + containers: + - name: cluster-init + image: cockroachdb/cockroach:v22.1.6 + imagePullPolicy: IfNotPresent + command: + - "/cockroach/cockroach" + - "init" + - "--insecure" + - "--host=cockroachdb-0.cockroachdb" + restartPolicy: OnFailure diff --git a/cockroachdb-statefulset.yaml b/cockroachdb-statefulset.yaml new file mode 100644 index 000000000..f308e8fce --- /dev/null +++ b/cockroachdb-statefulset.yaml @@ -0,0 +1,182 @@ +# Generated file, DO NOT EDIT. Source: cloud/kubernetes/templates/cockroachdb-statefulset.yaml +apiVersion: v1 +kind: Service +metadata: + # This service is meant to be used by clients of the database. It exposes a ClusterIP that will + # automatically load balance connections to the different database pods. + name: cockroachdb-public + labels: + app: cockroachdb +spec: + ports: + # The main port, served by gRPC, serves Postgres-flavor SQL, internode + # traffic and the cli. + - port: 26257 + targetPort: 26257 + name: grpc + # The secondary port serves the UI as well as health and debug endpoints. + - port: 8080 + targetPort: 8080 + name: http + selector: + app: cockroachdb +--- +apiVersion: v1 +kind: Service +metadata: + # This service only exists to create DNS entries for each pod in the stateful + # set such that they can resolve each other's IP addresses. It does not + # create a load-balanced ClusterIP and should not be used directly by clients + # in most circumstances. + name: cockroachdb + labels: + app: cockroachdb + annotations: + # Use this annotation in addition to the actual publishNotReadyAddresses + # field below because the annotation will stop being respected soon but the + # field is broken in some versions of Kubernetes: + # https://github.com/kubernetes/kubernetes/issues/58662 + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" + # Enable automatic monitoring of all instances when Prometheus is running in the cluster. + prometheus.io/scrape: "true" + prometheus.io/path: "_status/vars" + prometheus.io/port: "8080" +spec: + ports: + - port: 26257 + targetPort: 26257 + name: grpc + - port: 8080 + targetPort: 8080 + name: http + # We want all pods in the StatefulSet to have their addresses published for + # the sake of the other CockroachDB pods even before they're ready, since they + # have to be able to talk to each other in order to become ready. + publishNotReadyAddresses: true + clusterIP: None + selector: + app: cockroachdb +--- +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: cockroachdb-budget + labels: + app: cockroachdb +spec: + selector: + matchLabels: + app: cockroachdb + maxUnavailable: 1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: cockroachdb +spec: + serviceName: "cockroachdb" + replicas: 3 + selector: + matchLabels: + app: cockroachdb + template: + metadata: + labels: + app: cockroachdb + spec: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - cockroachdb + topologyKey: kubernetes.io/hostname + containers: + - name: cockroachdb + image: cockroachdb/cockroach:v22.1.6 + imagePullPolicy: IfNotPresent + # TODO: Change these to appropriate values for the hardware that you're running. You can see + # the resources that can be allocated on each of your Kubernetes nodes by running: + # kubectl describe nodes + # Note that requests and limits should have identical values. + resources: + requests: + cpu: "250m" + memory: "1Gi" + limits: + cpu: "1" + memory: "1Gi" + ports: + - containerPort: 26257 + name: grpc + - containerPort: 8080 + name: http +# We recommend that you do not configure a liveness probe on a production environment, as this can impact the availability of production databases. +# livenessProbe: +# httpGet: +# path: "/health" +# port: http +# initialDelaySeconds: 30 +# periodSeconds: 5 + readinessProbe: + httpGet: + path: "/health?ready=1" + port: http + initialDelaySeconds: 10 + periodSeconds: 5 + failureThreshold: 2 + volumeMounts: + - name: datadir + mountPath: /cockroach/cockroach-data + env: + - name: COCKROACH_CHANNEL + value: kubernetes-insecure + - name: GOMAXPROCS + valueFrom: + resourceFieldRef: + resource: limits.cpu + divisor: "1" + - name: MEMORY_LIMIT_MIB + valueFrom: + resourceFieldRef: + resource: limits.memory + divisor: "1Mi" + command: + - "/bin/bash" + - "-ecx" + # The use of qualified `hostname -f` is crucial: + # Other nodes aren't able to look up the unqualified hostname. + - exec + /cockroach/cockroach + start + --logtostderr + --insecure + --advertise-host $(hostname -f) + --http-addr 0.0.0.0 + --join cockroachdb-0.cockroachdb,cockroachdb-1.cockroachdb,cockroachdb-2.cockroachdb + --cache $(expr $MEMORY_LIMIT_MIB / 4)MiB + --max-sql-memory $(expr $MEMORY_LIMIT_MIB / 4)MiB + # No pre-stop hook is required, a SIGTERM plus some time is all that's + # needed for graceful shutdown of a node. + terminationGracePeriodSeconds: 60 + volumes: + - name: datadir + persistentVolumeClaim: + claimName: datadir + podManagementPolicy: Parallel + updateStrategy: + type: RollingUpdate + volumeClaimTemplates: + - metadata: + name: datadir + spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 10Gi -- GitLab From 16ad5d96ccab70d41bd1b1860221bc18be5943d2 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Thu, 15 Dec 2022 10:09:33 +0000 Subject: [PATCH 012/158] Context component: - reviewing integration with CockroachDB - reviewing context REST API - reviewing database schema - reviewing code --- manifests/cockroachdb/README.md | 53 + .../cockroachdb/client-secure-operator.yaml | 51 + manifests/cockroachdb/cluster.yaml | 70 + manifests/cockroachdb/crds.yaml | 1385 ++++++++++++++++ .../cockroachdb/from_carlos/cluster-init.yaml | 0 .../from_carlos/cockroachdb-statefulset.yaml | 0 manifests/cockroachdb/operator.yaml | 602 +++++++ manifests/contextservice.yaml | 4 +- src/context/Config.py | 2 - src/context/requirements.in | 7 +- .../service/{grpc_server => }/Constants.py | 0 .../{grpc_server => }/ContextService.py | 10 +- .../service/ContextServiceServicerImpl.py | 1195 ++++++++++++++ src/context/service/Database.py | 2 +- src/context/service/Engine.py | 40 + src/context/service/__main__.py | 67 +- .../__init__.py => _old_code/Config.py} | 2 + .../service/{ => _old_code}/Populate.py | 0 .../{rest_server => _old_code}/Resources.py | 0 .../{rest_server => _old_code}/RestServer.py | 0 .../{grpc_server => _old_code}/__init__.py | 0 src/context/service/_old_code/__main__.py | 85 + src/context/service/_old_code/test_unitary.py | 1450 +++++++++++++++++ src/context/service/database/Base.py | 2 - src/context/service/database/ConfigModel.py | 2 +- .../service/database/ConnectionModel.py | 2 +- .../service/database/ConstraintModel.py | 2 +- src/context/service/database/ContextModel.py | 27 +- src/context/service/database/DeviceModel.py | 2 +- src/context/service/database/EndPointModel.py | 2 +- src/context/service/database/LinkModel.py | 2 +- .../service/database/RelationModels.py | 2 +- src/context/service/database/ServiceModel.py | 2 +- src/context/service/database/TopologyModel.py | 2 +- src/context/service/database/_Base.py | 22 + src/context/service/database/__init__.py | 1 + .../grpc_server/ContextServiceServicerImpl.py | 1213 -------------- src/context/tests/test_unitary.py | 132 +- 38 files changed, 5008 insertions(+), 1432 deletions(-) create mode 100644 manifests/cockroachdb/README.md create mode 100644 manifests/cockroachdb/client-secure-operator.yaml create mode 100644 manifests/cockroachdb/cluster.yaml create mode 100644 manifests/cockroachdb/crds.yaml rename cluster-init.yaml => manifests/cockroachdb/from_carlos/cluster-init.yaml (100%) rename cockroachdb-statefulset.yaml => manifests/cockroachdb/from_carlos/cockroachdb-statefulset.yaml (100%) create mode 100644 manifests/cockroachdb/operator.yaml rename src/context/service/{grpc_server => }/Constants.py (100%) rename src/context/service/{grpc_server => }/ContextService.py (86%) create mode 100644 src/context/service/ContextServiceServicerImpl.py create mode 100644 src/context/service/Engine.py rename src/context/service/{rest_server/__init__.py => _old_code/Config.py} (86%) rename src/context/service/{ => _old_code}/Populate.py (100%) rename src/context/service/{rest_server => _old_code}/Resources.py (100%) rename src/context/service/{rest_server => _old_code}/RestServer.py (100%) rename src/context/service/{grpc_server => _old_code}/__init__.py (100%) create mode 100644 src/context/service/_old_code/__main__.py create mode 100644 src/context/service/_old_code/test_unitary.py delete mode 100644 src/context/service/database/Base.py create mode 100644 src/context/service/database/_Base.py delete mode 100644 src/context/service/grpc_server/ContextServiceServicerImpl.py diff --git a/manifests/cockroachdb/README.md b/manifests/cockroachdb/README.md new file mode 100644 index 000000000..6807afbb0 --- /dev/null +++ b/manifests/cockroachdb/README.md @@ -0,0 +1,53 @@ +# Ref: https://www.cockroachlabs.com/docs/stable/configure-cockroachdb-kubernetes.html + +DEPLOY_PATH="manifests/cockroachdb" +OPERATOR_BASE_URL="https://raw.githubusercontent.com/cockroachdb/cockroach-operator/master" + +mkdir -p ${DEPLOY_PATH} + +# Apply Custom Resource Definition for the CockroachDB Operator +curl -o "${DEPLOY_PATH}/crds.yaml" "${OPERATOR_BASE_URL}/install/crds.yaml" +kubectl apply -f "${DEPLOY_PATH}/crds.yaml" + +# Deploy CockroachDB Operator +curl -o "${DEPLOY_PATH}/operator.yaml" "${OPERATOR_BASE_URL}/install/operator.yaml" +# edit "${DEPLOY_PATH}/operator.yaml" +# - add env var: WATCH_NAMESPACE='tfs-ccdb' +kubectl apply -f "${DEPLOY_PATH}/operator.yaml" + +# Deploy CockroachDB +curl -o "${DEPLOY_PATH}/cluster.yaml" "${OPERATOR_BASE_URL}/examples/example.yaml" +# edit "${DEPLOY_PATH}/cluster.yaml" +# - set version +# - set number of replicas +kubectl create namespace tfs-ccdb +kubectl apply --namespace tfs-ccdb -f "${DEPLOY_PATH}/cluster.yaml" + +# Deploy CockroachDB Client +curl -o "${DEPLOY_PATH}/client-secure-operator.yaml" "${OPERATOR_BASE_URL}/examples/client-secure-operator.yaml" +kubectl create --namespace tfs-ccdb -f "${DEPLOY_PATH}/client-secure-operator.yaml" + +# Add tfs user with admin rights +$ kubectl exec -it ccdb-client-secure --namespace tfs-ccdb -- ./cockroach sql --certs-dir=/cockroach/cockroach-certs --host=cockroachdb-public +-- CREATE USER tfs WITH PASSWORD 'tfs123'; +-- GRANT admin TO tfs; + +# Expose CockroachDB SQL port (26257) +PORT=$(kubectl --namespace cockroachdb get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}') +PATCH='{"data": {"'${PORT}'": "cockroachdb/cockroachdb-public:'${PORT}'"}}' +kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}" + +PORT_MAP='{"containerPort": '${PORT}', "hostPort": '${PORT}'}' +CONTAINER='{"name": "nginx-ingress-microk8s", "ports": ['${PORT_MAP}']}' +PATCH='{"spec": {"template": {"spec": {"containers": ['${CONTAINER}']}}}}' +kubectl patch daemonset nginx-ingress-microk8s-controller --namespace ingress --patch "${PATCH}" + +# Expose CockroachDB Console port (8080) +PORT=$(kubectl --namespace cockroachdb get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="http")].port}') +PATCH='{"data": {"'${PORT}'": "cockroachdb/cockroachdb-public:'${PORT}'"}}' +kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}" + +PORT_MAP='{"containerPort": '${PORT}', "hostPort": '${PORT}'}' +CONTAINER='{"name": "nginx-ingress-microk8s", "ports": ['${PORT_MAP}']}' +PATCH='{"spec": {"template": {"spec": {"containers": ['${CONTAINER}']}}}}' +kubectl patch daemonset nginx-ingress-microk8s-controller --namespace ingress --patch "${PATCH}" diff --git a/manifests/cockroachdb/client-secure-operator.yaml b/manifests/cockroachdb/client-secure-operator.yaml new file mode 100644 index 000000000..618d30ce6 --- /dev/null +++ b/manifests/cockroachdb/client-secure-operator.yaml @@ -0,0 +1,51 @@ +# Copyright 2022 The Cockroach Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated, do not edit. Please edit this file instead: config/templates/client-secure-operator.yaml.in +# + +apiVersion: v1 +kind: Pod +metadata: + name: cockroachdb-client-secure +spec: + serviceAccountName: cockroachdb-sa + containers: + - name: cockroachdb-client-secure + image: cockroachdb/cockroach:v22.1.8 + imagePullPolicy: IfNotPresent + volumeMounts: + - name: client-certs + mountPath: /cockroach/cockroach-certs/ + command: + - sleep + - "2147483648" # 2^31 + terminationGracePeriodSeconds: 0 + volumes: + - name: client-certs + projected: + sources: + - secret: + name: cockroachdb-node + items: + - key: ca.crt + path: ca.crt + - secret: + name: cockroachdb-root + items: + - key: tls.crt + path: client.root.crt + - key: tls.key + path: client.root.key + defaultMode: 256 diff --git a/manifests/cockroachdb/cluster.yaml b/manifests/cockroachdb/cluster.yaml new file mode 100644 index 000000000..d36685109 --- /dev/null +++ b/manifests/cockroachdb/cluster.yaml @@ -0,0 +1,70 @@ +# Copyright 2022 The Cockroach Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated, do not edit. Please edit this file instead: config/templates/example.yaml.in +# + +apiVersion: crdb.cockroachlabs.com/v1alpha1 +kind: CrdbCluster +metadata: + # this translates to the name of the statefulset that is created + name: cockroachdb +spec: + dataStore: + pvc: + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "60Gi" + volumeMode: Filesystem + resources: + requests: + # This is intentionally low to make it work on local k3d clusters. + cpu: 100m + memory: 1Gi + limits: + cpu: 1 + memory: 4Gi + tlsEnabled: true +# You can set either a version of the db or a specific image name +# cockroachDBVersion: v22.1.12 + image: + name: cockroachdb/cockroach:v22.1.12 + # nodes refers to the number of crdb pods that are created + # via the statefulset + nodes: 3 + additionalLabels: + crdb: is-cool + # affinity is a new API field that is behind a feature gate that is + # disabled by default. To enable please see the operator.yaml file. + + # The affinity field will accept any podSpec affinity rule. + # affinity: + # podAntiAffinity: + # preferredDuringSchedulingIgnoredDuringExecution: + # - weight: 100 + # podAffinityTerm: + # labelSelector: + # matchExpressions: + # - key: app.kubernetes.io/instance + # operator: In + # values: + # - cockroachdb + # topologyKey: kubernetes.io/hostname + + # nodeSelectors used to match against + # nodeSelector: + # worker-pool-name: crdb-workers diff --git a/manifests/cockroachdb/crds.yaml b/manifests/cockroachdb/crds.yaml new file mode 100644 index 000000000..1b5cd89ae --- /dev/null +++ b/manifests/cockroachdb/crds.yaml @@ -0,0 +1,1385 @@ +# Copyright 2022 The Cockroach Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (unknown) + creationTimestamp: null + name: crdbclusters.crdb.cockroachlabs.com +spec: + group: crdb.cockroachlabs.com + names: + categories: + - all + - cockroachdb + kind: CrdbCluster + listKind: CrdbClusterList + plural: crdbclusters + shortNames: + - crdb + singular: crdbcluster + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: CrdbCluster is the CRD for the cockroachDB clusters API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: CrdbClusterSpec defines the desired state of a CockroachDB + Cluster that the operator maintains. + properties: + additionalAnnotations: + additionalProperties: + type: string + description: (Optional) Additional custom resource annotations that + are added to all resources. Changing `AdditionalAnnotations` field + will result in cockroachDB cluster restart. + type: object + additionalArgs: + description: '(Optional) Additional command line arguments for the + `cockroach` binary Default: ""' + items: + type: string + type: array + additionalLabels: + additionalProperties: + type: string + description: (Optional) Additional custom resource labels that are + added to all resources + type: object + affinity: + description: (Optional) If specified, the pod's scheduling constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the + pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the affinity expressions specified by + this field, but it may choose a node that violates one or + more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node matches + the corresponding matchExpressions; the node(s) with the + highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches + all objects with implicit weight 0 (i.e. it's a no-op). + A null preferred scheduling term matches no objects (i.e. + is also a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to an update), the system may or may not try to + eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: A null or empty node selector term matches + no objects. The requirements of them are ANDed. The + TopologySelectorTerm type implements a subset of the + NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate + this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the affinity expressions specified by + this field, but it may choose a node that violates one or + more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node has + pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces + the labelSelector applies to (matches against); + null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to a pod label update), the system may or may + not try to eventually evict the pod from its node. When + there are multiple elements, the lists of nodes corresponding + to each podAffinityTerm are intersected, i.e. all terms + must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key <topologyKey> + matches that of any node on which a pod of the set of + pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the + labelSelector applies to (matches against); null or + empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of + any node on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. + avoid putting this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the anti-affinity expressions specified + by this field, but it may choose a node that violates one + or more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node has + pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces + the labelSelector applies to (matches against); + null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by + this field are not met at scheduling time, the pod will + not be scheduled onto the node. If the anti-affinity requirements + specified by this field cease to be met at some point during + pod execution (e.g. due to a pod label update), the system + may or may not try to eventually evict the pod from its + node. When there are multiple elements, the lists of nodes + corresponding to each podAffinityTerm are intersected, i.e. + all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key <topologyKey> + matches that of any node on which a pod of the set of + pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the + labelSelector applies to (matches against); null or + empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of + any node on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + automountServiceAccountToken: + description: '(Optional) AutomountServiceAccountToken determines whether + or not the stateful set pods should automount the service account + token. This is the default behavior in Kubernetes. For backward + compatibility reasons, this value defaults to `false` here. Default: + false' + type: boolean + cache: + description: '(Optional) The total size for caches (`--cache` command + line parameter) Default: "25%"' + type: string + clientTLSSecret: + description: '(Optional) The secret with a certificate and a private + key for root database user Default: ""' + type: string + cockroachDBVersion: + description: '(Optional) CockroachDBVersion sets the explicit version + of the cockroachDB image Default: ""' + type: string + dataStore: + description: Database disk storage configuration + properties: + hostPath: + description: (Optional) Directory from the host node's filesystem + properties: + path: + description: 'Path of the directory on the host. If the path + is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + type: + description: 'Type for HostPath Volume Defaults to "" More + info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + required: + - path + type: object + pvc: + description: (Optional) Persistent volume to use + properties: + source: + description: (Optional) Existing PVC in the same namespace + properties: + claimName: + description: 'ClaimName is the name of a PersistentVolumeClaim + in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + type: string + readOnly: + description: Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + spec: + description: (Optional) PVC to request a new persistent volume + properties: + accessModes: + description: 'AccessModes contains the desired access + modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'This field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) * An existing + custom resource that implements data population (Alpha) + In order to use custom resource types that implement + data population, the AnyVolumeDataSource feature gate + must be enabled. If the provisioner or an external controller + can support the specified data source, it will create + a new volume based on the contents of the specified + data source.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + resources: + description: 'Resources represents the minimum resources + the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount + of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount + of compute resources required. If Requests is omitted + for a container, it defaults to Limits if that is + explicitly specified, otherwise to an implementation-defined + value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + selector: + description: A label query over volumes to consider for + binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values + array must be non-empty. If the operator is + Exists or DoesNotExist, the values array must + be empty. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'Name of the StorageClass required by the + claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume is + required by the claim. Value of Filesystem is implied + when not included in claim spec. + type: string + volumeName: + description: VolumeName is the binding reference to the + PersistentVolume backing this claim. + type: string + type: object + type: object + supportsAutoResize: + description: '(Optional) SupportsAutoResize marks that a PVC will + resize without restarting the entire cluster Default: false' + type: boolean + type: object + grpcPort: + description: '(Optional) The database port (`--port` CLI parameter + when starting the service) Default: 26258' + format: int32 + type: integer + httpPort: + description: '(Optional) The web UI port (`--http-port` CLI parameter + when starting the service) Default: 8080' + format: int32 + type: integer + image: + description: (Optional) Container image information + properties: + name: + description: 'Container image with supported CockroachDB version. + This defaults to the version pinned to the operator and requires + a full container and tag/sha name. For instance: cockroachdb/cockroachdb:v20.1' + type: string + pullPolicy: + description: '(Optional) PullPolicy for the image, which defaults + to IfNotPresent. Default: IfNotPresent' + type: string + pullSecret: + description: (Optional) Secret name containing the dockerconfig + to use for a registry that requires authentication. The secret + must be configured first by the user. + type: string + required: + - name + type: object + ingress: + description: (Optional) Ingress defines the Ingress configuration + used to expose the services using Ingress + properties: + sql: + description: (Optional) Ingress options for SQL connections Adding/changing + the SQL host will result in rolling update of the crdb cluster + nodes + properties: + annotations: + additionalProperties: + type: string + description: (Optional) Annotations related to ingress resource + type: object + host: + description: host is host to be used for exposing service + type: string + ingressClassName: + description: (Optional) IngressClassName to be used by ingress + resource + type: string + tls: + description: (Optional) TLS describes the TLS certificate + info + items: + description: IngressTLS describes the transport layer security + associated with an Ingress. + properties: + hosts: + description: Hosts are a list of hosts included in the + TLS certificate. The values in this list must match + the name/s used in the tlsSecret. Defaults to the + wildcard host setting for the loadbalancer controller + fulfilling this Ingress, if left unspecified. + items: + type: string + type: array + x-kubernetes-list-type: atomic + secretName: + description: SecretName is the name of the secret used + to terminate TLS traffic on port 443. Field is left + optional to allow TLS routing based on SNI hostname + alone. If the SNI host in a listener conflicts with + the "Host" header field used by an IngressRule, the + SNI host is used for termination and value of the + Host header is used for routing. + type: string + type: object + type: array + required: + - host + type: object + ui: + description: (Optional) Ingress options for UI (HTTP) connections + properties: + annotations: + additionalProperties: + type: string + description: (Optional) Annotations related to ingress resource + type: object + host: + description: host is host to be used for exposing service + type: string + ingressClassName: + description: (Optional) IngressClassName to be used by ingress + resource + type: string + tls: + description: (Optional) TLS describes the TLS certificate + info + items: + description: IngressTLS describes the transport layer security + associated with an Ingress. + properties: + hosts: + description: Hosts are a list of hosts included in the + TLS certificate. The values in this list must match + the name/s used in the tlsSecret. Defaults to the + wildcard host setting for the loadbalancer controller + fulfilling this Ingress, if left unspecified. + items: + type: string + type: array + x-kubernetes-list-type: atomic + secretName: + description: SecretName is the name of the secret used + to terminate TLS traffic on port 443. Field is left + optional to allow TLS routing based on SNI hostname + alone. If the SNI host in a listener conflicts with + the "Host" header field used by an IngressRule, the + SNI host is used for termination and value of the + Host header is used for routing. + type: string + type: object + type: array + required: + - host + type: object + type: object + logConfigMap: + description: '(Optional) LogConfigMap define the config map which + contains log configuration used to send the logs through the proper + channels in the cockroachdb. Logging configuration is available + for cockroach version v21.1.0 onwards. The logging configuration + is taken in format of yaml file, you can check the logging configuration + here (https://www.cockroachlabs.com/docs/stable/configure-logs.html#default-logging-configuration) + The default logging for cockroach version v20.x or less is stderr, + logging API is ignored for older versions. NOTE: The `data` field + of map must contain an entry called `logging.yaml` that contains + config options.' + type: string + maxSQLMemory: + description: '(Optional) The maximum in-memory storage capacity available + to store temporary data for SQL queries (`--max-sql-memory` parameter) + Default: "25%"' + type: string + maxUnavailable: + description: (Optional) The maximum number of pods that can be unavailable + during a rolling update. This number is set in the PodDistruptionBudget + and defaults to 1. + format: int32 + type: integer + minAvailable: + description: (Optional) The min number of pods that can be unavailable + during a rolling update. This number is set in the PodDistruptionBudget + and defaults to 1. + format: int32 + type: integer + nodeSelector: + additionalProperties: + type: string + description: (Optional) If specified, the pod's nodeSelector + type: object + nodeTLSSecret: + description: '(Optional) The secret with certificates and a private + key for the TLS endpoint on the database port. The standard naming + of files is expected (tls.key, tls.crt, ca.crt) Default: ""' + type: string + nodes: + description: Number of nodes (pods) in the cluster + format: int32 + minimum: 3 + type: integer + podEnvVariables: + description: '(Optional) PodEnvVariables is a slice of environment + variables that are added to the pods Default: (empty list)' + items: + description: EnvVar represents an environment variable present in + a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded using + the previous defined environment variables in the container + and any service environment variables. If a variable cannot + be resolved, the reference in the input string will be unchanged. + The $(VAR_NAME) syntax can be escaped with a double $$, ie: + $$(VAR_NAME). Escaped references will never be expanded, regardless + of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. Cannot + be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels[''<KEY>'']`, `metadata.annotations[''<KEY>'']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + resources: + description: '(Optional) Database container resource limits. Any container + limits can be specified. Default: (not specified)' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources + allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + sqlPort: + description: '(Optional) The SQL Port number Default: 26257' + format: int32 + type: integer + tlsEnabled: + description: (Optional) TLSEnabled determines if TLS is enabled for + your CockroachDB Cluster + type: boolean + tolerations: + description: (Optional) Tolerations for scheduling pods onto some + dedicated nodes + items: + description: The pod this Toleration is attached to tolerates any + taint that matches the triple <key,value,effect> using the matching + operator <operator>. + properties: + effect: + description: Effect indicates the taint effect to match. Empty + means match all taint effects. When specified, allowed values + are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies + to. Empty means match all taint keys. If the key is empty, + operator must be Exists; this combination means to match all + values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the + value. Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod + can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time + the toleration (which must be of effect NoExecute, otherwise + this field is ignored) tolerates the taint. By default, it + is not set, which means tolerate the taint forever (do not + evict). Zero and negative values will be treated as 0 (evict + immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches + to. If the operator is Exists, the value should be empty, + otherwise just a regular string. + type: string + type: object + type: array + topologySpreadConstraints: + description: (Optional) If specified, the pod's topology spread constraints + items: + description: TopologySpreadConstraint specifies how to spread matching + pods among the given topology. + properties: + labelSelector: + description: LabelSelector is used to find matching pods. Pods + that match this label selector are counted to determine the + number of pods in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values array + must be non-empty. If the operator is Exists or + DoesNotExist, the values array must be empty. This + array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is + "key", the operator is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + maxSkew: + description: 'MaxSkew describes the degree to which pods may + be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, + it is the maximum permitted difference between the number + of matching pods in the target topology and the global minimum. + For example, in a 3-zone cluster, MaxSkew is set to 1, and + pods with the same labelSelector spread as 1/1/0: | zone1 + | zone2 | zone3 | | P | P | | - if MaxSkew is + 1, incoming pod can only be scheduled to zone3 to become 1/1/1; + scheduling it onto zone1(zone2) would make the ActualSkew(2-0) + on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming + pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, + it is used to give higher precedence to topologies that satisfy + it. It''s a required field. Default value is 1 and 0 is not + allowed.' + format: int32 + type: integer + topologyKey: + description: TopologyKey is the key of node labels. Nodes that + have a label with this key and identical values are considered + to be in the same topology. We consider each <key, value> + as a "bucket", and try to put balanced number of pods into + each bucket. It's a required field. + type: string + whenUnsatisfiable: + description: 'WhenUnsatisfiable indicates how to deal with a + pod if it doesn''t satisfy the spread constraint. - DoNotSchedule + (default) tells the scheduler not to schedule it. - ScheduleAnyway + tells the scheduler to schedule the pod in any location, but + giving higher precedence to topologies that would help reduce + the skew. A constraint is considered "Unsatisfiable" for + an incoming pod if and only if every possible node assigment + for that pod would violate "MaxSkew" on some topology. For + example, in a 3-zone cluster, MaxSkew is set to 1, and pods + with the same labelSelector spread as 3/1/1: | zone1 | zone2 + | zone3 | | P P P | P | P | If WhenUnsatisfiable is + set to DoNotSchedule, incoming pod can only be scheduled to + zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on + zone2(zone3) satisfies MaxSkew(1). In other words, the cluster + can still be imbalanced, but scheduler won''t make it *more* + imbalanced. It''s a required field.' + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + required: + - dataStore + - nodes + type: object + status: + description: CrdbClusterStatus defines the observed state of Cluster + properties: + clusterStatus: + description: OperatorStatus represent the status of the operator(Failed, + Starting, Running or Other) + type: string + conditions: + description: List of conditions representing the current status of + the cluster resource. + items: + description: ClusterCondition represents cluster status as it is + perceived by the operator + properties: + lastTransitionTime: + description: The time when the condition was updated + format: date-time + type: string + status: + description: 'Condition status: True, False or Unknown' + type: string + type: + description: Type/Name of the condition + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + crdbcontainerimage: + description: CrdbContainerImage is the container that will be installed + type: string + operatorActions: + items: + description: ClusterAction represents cluster status as it is perceived + by the operator + properties: + lastTransitionTime: + description: The time when the condition was updated + format: date-time + type: string + message: + description: (Optional) Message related to the status of the + action + type: string + status: + description: 'Action status: Failed, Finished or Unknown' + type: string + type: + description: Type/Name of the action + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + sqlHost: + description: SQLHost is the host to be used with SQL ingress + type: string + version: + description: Database service version. Not populated and is just a + placeholder currently. + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/cluster-init.yaml b/manifests/cockroachdb/from_carlos/cluster-init.yaml similarity index 100% rename from cluster-init.yaml rename to manifests/cockroachdb/from_carlos/cluster-init.yaml diff --git a/cockroachdb-statefulset.yaml b/manifests/cockroachdb/from_carlos/cockroachdb-statefulset.yaml similarity index 100% rename from cockroachdb-statefulset.yaml rename to manifests/cockroachdb/from_carlos/cockroachdb-statefulset.yaml diff --git a/manifests/cockroachdb/operator.yaml b/manifests/cockroachdb/operator.yaml new file mode 100644 index 000000000..2db3c37f8 --- /dev/null +++ b/manifests/cockroachdb/operator.yaml @@ -0,0 +1,602 @@ +# Copyright 2022 The Cockroach Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +apiVersion: v1 +kind: Namespace +metadata: + labels: + control-plane: cockroach-operator + name: cockroach-operator-system +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app: cockroach-operator + name: cockroach-operator-sa + namespace: cockroach-operator-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + name: cockroach-operator-role +rules: +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + verbs: + - get + - patch + - update +- apiGroups: + - admissionregistration.k8s.io + resources: + - validatingwebhookconfigurations + verbs: + - get + - patch + - update +- apiGroups: + - apps + resources: + - statefulsets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - apps + resources: + - statefulsets/finalizers + verbs: + - get + - list + - watch +- apiGroups: + - apps + resources: + - statefulsets/scale + verbs: + - get + - update + - watch +- apiGroups: + - apps + resources: + - statefulsets/status + verbs: + - get + - patch + - update +- apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - batch + resources: + - jobs/finalizers + verbs: + - get + - list + - watch +- apiGroups: + - batch + resources: + - jobs/status + verbs: + - get +- apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests/approval + verbs: + - update +- apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests/status + verbs: + - get + - patch + - update +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - configmaps/status + verbs: + - get +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list +- apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - list + - update +- apiGroups: + - "" + resources: + - pods + verbs: + - delete + - deletecollection + - get + - list +- apiGroups: + - "" + resources: + - pods/exec + verbs: + - create +- apiGroups: + - "" + resources: + - pods/log + verbs: + - get +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - get + - list + - watch +- apiGroups: + - "" + resources: + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - services/finalizers + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - services/status + verbs: + - get + - patch + - update +- apiGroups: + - crdb.cockroachlabs.com + resources: + - crdbclusters + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - crdb.cockroachlabs.com + resources: + - crdbclusters/finalizers + verbs: + - update +- apiGroups: + - crdb.cockroachlabs.com + resources: + - crdbclusters/status + verbs: + - get + - patch + - update +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - networking.k8s.io + resources: + - ingresses/finalizers + verbs: + - get + - list + - watch +- apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - get +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - policy + resources: + - poddisruptionbudgets/finalizers + verbs: + - get + - list + - watch +- apiGroups: + - policy + resources: + - poddisruptionbudgets/status + verbs: + - get +- apiGroups: + - rbac.authorization.k8s.io + resources: + - rolebindings + verbs: + - create + - get + - list + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - roles + verbs: + - create + - get + - list + - watch +- apiGroups: + - security.openshift.io + resources: + - securitycontextconstraints + verbs: + - use +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cockroach-operator-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cockroach-operator-role +subjects: +- kind: ServiceAccount + name: cockroach-operator-sa + namespace: cockroach-operator-system +--- +apiVersion: v1 +kind: Service +metadata: + labels: + control-plane: cockroach-operator + name: cockroach-operator-webhook-service + namespace: cockroach-operator-system +spec: + ports: + - port: 443 + targetPort: 9443 + selector: + app: cockroach-operator +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: cockroach-operator + name: cockroach-operator-manager + namespace: cockroach-operator-system +spec: + replicas: 1 + selector: + matchLabels: + app: cockroach-operator + template: + metadata: + labels: + app: cockroach-operator + spec: + containers: + - args: + - -zap-log-level + - info + env: + - name: RELATED_IMAGE_COCKROACH_v20_1_4 + value: cockroachdb/cockroach:v20.1.4 + - name: RELATED_IMAGE_COCKROACH_v20_1_5 + value: cockroachdb/cockroach:v20.1.5 + - name: RELATED_IMAGE_COCKROACH_v20_1_8 + value: cockroachdb/cockroach:v20.1.8 + - name: RELATED_IMAGE_COCKROACH_v20_1_11 + value: cockroachdb/cockroach:v20.1.11 + - name: RELATED_IMAGE_COCKROACH_v20_1_12 + value: cockroachdb/cockroach:v20.1.12 + - name: RELATED_IMAGE_COCKROACH_v20_1_13 + value: cockroachdb/cockroach:v20.1.13 + - name: RELATED_IMAGE_COCKROACH_v20_1_15 + value: cockroachdb/cockroach:v20.1.15 + - name: RELATED_IMAGE_COCKROACH_v20_1_16 + value: cockroachdb/cockroach:v20.1.16 + - name: RELATED_IMAGE_COCKROACH_v20_1_17 + value: cockroachdb/cockroach:v20.1.17 + - name: RELATED_IMAGE_COCKROACH_v20_2_0 + value: cockroachdb/cockroach:v20.2.0 + - name: RELATED_IMAGE_COCKROACH_v20_2_1 + value: cockroachdb/cockroach:v20.2.1 + - name: RELATED_IMAGE_COCKROACH_v20_2_2 + value: cockroachdb/cockroach:v20.2.2 + - name: RELATED_IMAGE_COCKROACH_v20_2_3 + value: cockroachdb/cockroach:v20.2.3 + - name: RELATED_IMAGE_COCKROACH_v20_2_4 + value: cockroachdb/cockroach:v20.2.4 + - name: RELATED_IMAGE_COCKROACH_v20_2_5 + value: cockroachdb/cockroach:v20.2.5 + - name: RELATED_IMAGE_COCKROACH_v20_2_6 + value: cockroachdb/cockroach:v20.2.6 + - name: RELATED_IMAGE_COCKROACH_v20_2_8 + value: cockroachdb/cockroach:v20.2.8 + - name: RELATED_IMAGE_COCKROACH_v20_2_9 + value: cockroachdb/cockroach:v20.2.9 + - name: RELATED_IMAGE_COCKROACH_v20_2_10 + value: cockroachdb/cockroach:v20.2.10 + - name: RELATED_IMAGE_COCKROACH_v20_2_11 + value: cockroachdb/cockroach:v20.2.11 + - name: RELATED_IMAGE_COCKROACH_v20_2_12 + value: cockroachdb/cockroach:v20.2.12 + - name: RELATED_IMAGE_COCKROACH_v20_2_13 + value: cockroachdb/cockroach:v20.2.13 + - name: RELATED_IMAGE_COCKROACH_v20_2_14 + value: cockroachdb/cockroach:v20.2.14 + - name: RELATED_IMAGE_COCKROACH_v20_2_15 + value: cockroachdb/cockroach:v20.2.15 + - name: RELATED_IMAGE_COCKROACH_v20_2_16 + value: cockroachdb/cockroach:v20.2.16 + - name: RELATED_IMAGE_COCKROACH_v20_2_17 + value: cockroachdb/cockroach:v20.2.17 + - name: RELATED_IMAGE_COCKROACH_v20_2_18 + value: cockroachdb/cockroach:v20.2.18 + - name: RELATED_IMAGE_COCKROACH_v20_2_19 + value: cockroachdb/cockroach:v20.2.19 + - name: RELATED_IMAGE_COCKROACH_v21_1_0 + value: cockroachdb/cockroach:v21.1.0 + - name: RELATED_IMAGE_COCKROACH_v21_1_1 + value: cockroachdb/cockroach:v21.1.1 + - name: RELATED_IMAGE_COCKROACH_v21_1_2 + value: cockroachdb/cockroach:v21.1.2 + - name: RELATED_IMAGE_COCKROACH_v21_1_3 + value: cockroachdb/cockroach:v21.1.3 + - name: RELATED_IMAGE_COCKROACH_v21_1_4 + value: cockroachdb/cockroach:v21.1.4 + - name: RELATED_IMAGE_COCKROACH_v21_1_5 + value: cockroachdb/cockroach:v21.1.5 + - name: RELATED_IMAGE_COCKROACH_v21_1_6 + value: cockroachdb/cockroach:v21.1.6 + - name: RELATED_IMAGE_COCKROACH_v21_1_7 + value: cockroachdb/cockroach:v21.1.7 + - name: RELATED_IMAGE_COCKROACH_v21_1_9 + value: cockroachdb/cockroach:v21.1.9 + - name: RELATED_IMAGE_COCKROACH_v21_1_10 + value: cockroachdb/cockroach:v21.1.10 + - name: RELATED_IMAGE_COCKROACH_v21_1_11 + value: cockroachdb/cockroach:v21.1.11 + - name: RELATED_IMAGE_COCKROACH_v21_1_12 + value: cockroachdb/cockroach:v21.1.12 + - name: RELATED_IMAGE_COCKROACH_v21_1_13 + value: cockroachdb/cockroach:v21.1.13 + - name: RELATED_IMAGE_COCKROACH_v21_1_14 + value: cockroachdb/cockroach:v21.1.14 + - name: RELATED_IMAGE_COCKROACH_v21_1_15 + value: cockroachdb/cockroach:v21.1.15 + - name: RELATED_IMAGE_COCKROACH_v21_1_16 + value: cockroachdb/cockroach:v21.1.16 + - name: RELATED_IMAGE_COCKROACH_v21_1_17 + value: cockroachdb/cockroach:v21.1.17 + - name: RELATED_IMAGE_COCKROACH_v21_1_18 + value: cockroachdb/cockroach:v21.1.18 + - name: RELATED_IMAGE_COCKROACH_v21_1_19 + value: cockroachdb/cockroach:v21.1.19 + - name: RELATED_IMAGE_COCKROACH_v21_2_0 + value: cockroachdb/cockroach:v21.2.0 + - name: RELATED_IMAGE_COCKROACH_v21_2_1 + value: cockroachdb/cockroach:v21.2.1 + - name: RELATED_IMAGE_COCKROACH_v21_2_2 + value: cockroachdb/cockroach:v21.2.2 + - name: RELATED_IMAGE_COCKROACH_v21_2_3 + value: cockroachdb/cockroach:v21.2.3 + - name: RELATED_IMAGE_COCKROACH_v21_2_4 + value: cockroachdb/cockroach:v21.2.4 + - name: RELATED_IMAGE_COCKROACH_v21_2_5 + value: cockroachdb/cockroach:v21.2.5 + - name: RELATED_IMAGE_COCKROACH_v21_2_7 + value: cockroachdb/cockroach:v21.2.7 + - name: RELATED_IMAGE_COCKROACH_v21_2_8 + value: cockroachdb/cockroach:v21.2.8 + - name: RELATED_IMAGE_COCKROACH_v21_2_9 + value: cockroachdb/cockroach:v21.2.9 + - name: RELATED_IMAGE_COCKROACH_v21_2_10 + value: cockroachdb/cockroach:v21.2.10 + - name: RELATED_IMAGE_COCKROACH_v21_2_11 + value: cockroachdb/cockroach:v21.2.11 + - name: RELATED_IMAGE_COCKROACH_v21_2_12 + value: cockroachdb/cockroach:v21.2.12 + - name: RELATED_IMAGE_COCKROACH_v21_2_13 + value: cockroachdb/cockroach:v21.2.13 + - name: RELATED_IMAGE_COCKROACH_v21_2_14 + value: cockroachdb/cockroach:v21.2.14 + - name: RELATED_IMAGE_COCKROACH_v21_2_15 + value: cockroachdb/cockroach:v21.2.15 + - name: RELATED_IMAGE_COCKROACH_v21_2_16 + value: cockroachdb/cockroach:v21.2.16 + - name: RELATED_IMAGE_COCKROACH_v22_1_0 + value: cockroachdb/cockroach:v22.1.0 + - name: RELATED_IMAGE_COCKROACH_v22_1_1 + value: cockroachdb/cockroach:v22.1.1 + - name: RELATED_IMAGE_COCKROACH_v22_1_2 + value: cockroachdb/cockroach:v22.1.2 + - name: RELATED_IMAGE_COCKROACH_v22_1_3 + value: cockroachdb/cockroach:v22.1.3 + - name: RELATED_IMAGE_COCKROACH_v22_1_4 + value: cockroachdb/cockroach:v22.1.4 + - name: RELATED_IMAGE_COCKROACH_v22_1_5 + value: cockroachdb/cockroach:v22.1.5 + - name: RELATED_IMAGE_COCKROACH_v22_1_7 + value: cockroachdb/cockroach:v22.1.7 + - name: RELATED_IMAGE_COCKROACH_v22_1_8 + value: cockroachdb/cockroach:v22.1.8 + - name: OPERATOR_NAME + value: cockroachdb + - name: WATCH_NAMESPACE + value: tfs-ccdb + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: cockroachdb/cockroach-operator:v2.8.0 + imagePullPolicy: IfNotPresent + name: cockroach-operator + resources: + requests: + cpu: 10m + memory: 32Mi + serviceAccountName: cockroach-operator-sa +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + creationTimestamp: null + name: cockroach-operator-mutating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cockroach-operator-webhook-service + namespace: cockroach-operator-system + path: /mutate-crdb-cockroachlabs-com-v1alpha1-crdbcluster + failurePolicy: Fail + name: mcrdbcluster.kb.io + rules: + - apiGroups: + - crdb.cockroachlabs.com + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - crdbclusters + sideEffects: None +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + creationTimestamp: null + name: cockroach-operator-validating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cockroach-operator-webhook-service + namespace: cockroach-operator-system + path: /validate-crdb-cockroachlabs-com-v1alpha1-crdbcluster + failurePolicy: Fail + name: vcrdbcluster.kb.io + rules: + - apiGroups: + - crdb.cockroachlabs.com + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - crdbclusters + sideEffects: None diff --git a/manifests/contextservice.yaml b/manifests/contextservice.yaml index 5c07971a3..8201aed3e 100644 --- a/manifests/contextservice.yaml +++ b/manifests/contextservice.yaml @@ -46,6 +46,8 @@ spec: - containerPort: 1010 - containerPort: 8080 env: + - name: CCDB_URL + value: "cockroachdb://tfs:tfs123@cockroachdb-public.cockroachdb.svc.cluster.local:26257/tfs?sslmode=require" - name: DB_BACKEND value: "redis" - name: MB_BACKEND @@ -54,8 +56,6 @@ spec: value: "0" - name: LOG_LEVEL value: "INFO" - - name: POPULATE_FAKE_DATA - value: "false" readinessProbe: exec: command: ["/bin/grpc_health_probe", "-addr=:1010"] diff --git a/src/context/Config.py b/src/context/Config.py index 6f5d1dc0b..70a332512 100644 --- a/src/context/Config.py +++ b/src/context/Config.py @@ -12,5 +12,3 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Autopopulate the component with fake data for testing purposes? -POPULATE_FAKE_DATA = False diff --git a/src/context/requirements.in b/src/context/requirements.in index 6e07456fc..6c68d692d 100644 --- a/src/context/requirements.in +++ b/src/context/requirements.in @@ -1,7 +1,8 @@ Flask==2.1.3 Flask-RESTful==0.3.9 +psycopg2-binary==2.9.3 redis==4.1.2 requests==2.27.1 -sqlalchemy==1.4.40 -sqlalchemy-cockroachdb -psycopg2-binary +SQLAlchemy==1.4.40 +sqlalchemy-cockroachdb==1.4.3 +SQLAlchemy-Utils==0.38.3 diff --git a/src/context/service/grpc_server/Constants.py b/src/context/service/Constants.py similarity index 100% rename from src/context/service/grpc_server/Constants.py rename to src/context/service/Constants.py diff --git a/src/context/service/grpc_server/ContextService.py b/src/context/service/ContextService.py similarity index 86% rename from src/context/service/grpc_server/ContextService.py rename to src/context/service/ContextService.py index efede01de..c4881ccf5 100644 --- a/src/context/service/grpc_server/ContextService.py +++ b/src/context/service/ContextService.py @@ -12,15 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. +import logging, sqlalchemy from common.Constants import ServiceNameEnum from common.Settings import get_service_port_grpc from common.message_broker.MessageBroker import MessageBroker from common.proto.context_pb2_grpc import add_ContextServiceServicer_to_server from common.proto.context_policy_pb2_grpc import add_ContextPolicyServiceServicer_to_server from common.tools.service.GenericGrpcService import GenericGrpcService -from sqlalchemy.orm import Session -import logging - from .ContextServiceServicerImpl import ContextServiceServicerImpl # Custom gRPC settings @@ -28,10 +26,12 @@ GRPC_MAX_WORKERS = 200 # multiple clients might keep connections alive for Get*E LOGGER = logging.getLogger(__name__) class ContextService(GenericGrpcService): - def __init__(self, session : Session, messagebroker : MessageBroker, cls_name: str = __name__) -> None: + def __init__( + self, db_engine : sqlalchemy.engine.Engine, messagebroker : MessageBroker, cls_name: str = __name__ + ) -> None: port = get_service_port_grpc(ServiceNameEnum.CONTEXT) super().__init__(port, max_workers=GRPC_MAX_WORKERS, cls_name=cls_name) - self.context_servicer = ContextServiceServicerImpl(session, messagebroker) + self.context_servicer = ContextServiceServicerImpl(db_engine, messagebroker) def install_servicers(self): add_ContextServiceServicer_to_server(self.context_servicer, self.server) diff --git a/src/context/service/ContextServiceServicerImpl.py b/src/context/service/ContextServiceServicerImpl.py new file mode 100644 index 000000000..b5725f007 --- /dev/null +++ b/src/context/service/ContextServiceServicerImpl.py @@ -0,0 +1,1195 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import grpc, json, logging, operator, sqlalchemy, threading, uuid +from sqlalchemy.orm import Session, contains_eager, selectinload, sessionmaker +from sqlalchemy.dialects.postgresql import UUID, insert +from sqlalchemy_cockroachdb import run_transaction +from typing import Dict, Iterator, List, Optional, Set, Tuple, Union +from common.message_broker.MessageBroker import MessageBroker +from common.orm.backend.Tools import key_to_str +from common.proto.context_pb2 import ( + Connection, ConnectionEvent, ConnectionId, ConnectionIdList, ConnectionList, + Context, ContextEvent, ContextId, ContextIdList, ContextList, + Device, DeviceEvent, DeviceId, DeviceIdList, DeviceList, + Empty, EventTypeEnum, + Link, LinkEvent, LinkId, LinkIdList, LinkList, + Service, ServiceEvent, ServiceId, ServiceIdList, ServiceList, + Slice, SliceEvent, SliceId, SliceIdList, SliceList, + Topology, TopologyEvent, TopologyId, TopologyIdList, TopologyList, + ConfigActionEnum, Constraint) +from common.proto.policy_pb2 import PolicyRuleIdList, PolicyRuleId, PolicyRuleList, PolicyRule +from common.proto.context_pb2_grpc import ContextServiceServicer +from common.proto.context_policy_pb2_grpc import ContextPolicyServiceServicer +from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method +from common.rpc_method_wrapper.ServiceExceptions import InvalidArgumentException, NotFoundException +from common.tools.grpc.Tools import grpc_message_to_json, grpc_message_to_json_string +from context.service.Database import Database +from context.service.database.ConfigModel import ( + ConfigModel, ORM_ConfigActionEnum, ConfigRuleModel, grpc_config_rules_to_raw, update_config) +from context.service.database.ConnectionModel import ConnectionModel, set_path +from context.service.database.ConstraintModel import ( + ConstraintModel, ConstraintsModel, Union_ConstraintModel, CONSTRAINT_PARSERS, set_constraints) +from context.service.database.ContextModel import ContextModel +from context.service.database.DeviceModel import ( + DeviceModel, grpc_to_enum__device_operational_status, set_drivers, grpc_to_enum__device_driver, DriverModel) +from context.service.database.EndPointModel import EndPointModel, KpiSampleTypeModel, set_kpi_sample_types +from context.service.database.Events import notify_event +from context.service.database.KpiSampleType import grpc_to_enum__kpi_sample_type +from context.service.database.LinkModel import LinkModel +from context.service.database.PolicyRuleModel import PolicyRuleModel +from context.service.database.RelationModels import ( + ConnectionSubServiceModel, LinkEndPointModel, ServiceEndPointModel, SliceEndPointModel, SliceServiceModel, + SliceSubSliceModel, TopologyDeviceModel, TopologyLinkModel) +from context.service.database.ServiceModel import ( + ServiceModel, grpc_to_enum__service_status, grpc_to_enum__service_type) +from context.service.database.SliceModel import SliceModel, grpc_to_enum__slice_status +from context.service.database.TopologyModel import TopologyModel +from .Constants import ( + CONSUME_TIMEOUT, TOPIC_CONNECTION, TOPIC_CONTEXT, TOPIC_DEVICE, TOPIC_LINK, TOPIC_SERVICE, TOPIC_SLICE, + TOPIC_TOPOLOGY) + +LOGGER = logging.getLogger(__name__) + +SERVICE_NAME = 'Context' +METHOD_NAMES = [ + 'ListConnectionIds', 'ListConnections', 'GetConnection', 'SetConnection', 'RemoveConnection', 'GetConnectionEvents', + 'ListContextIds', 'ListContexts', 'GetContext', 'SetContext', 'RemoveContext', 'GetContextEvents', + 'ListTopologyIds', 'ListTopologies', 'GetTopology', 'SetTopology', 'RemoveTopology', 'GetTopologyEvents', + 'ListDeviceIds', 'ListDevices', 'GetDevice', 'SetDevice', 'RemoveDevice', 'GetDeviceEvents', + 'ListLinkIds', 'ListLinks', 'GetLink', 'SetLink', 'RemoveLink', 'GetLinkEvents', + 'ListServiceIds', 'ListServices', 'GetService', 'SetService', 'RemoveService', 'GetServiceEvents', + 'ListSliceIds', 'ListSlices', 'GetSlice', 'SetSlice', 'RemoveSlice', 'GetSliceEvents', + 'ListPolicyRuleIds', 'ListPolicyRules', 'GetPolicyRule', 'SetPolicyRule', 'RemovePolicyRule', + 'UnsetService', 'UnsetSlice', +] +METRICS = create_metrics(SERVICE_NAME, METHOD_NAMES) + +class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceServicer): + def __init__(self, db_engine : sqlalchemy.engine.Engine, messagebroker : MessageBroker) -> None: + LOGGER.debug('Creating Servicer...') + self.db_engine = db_engine + #self.lock = threading.Lock() + #session = sessionmaker(bind=db_engine, expire_on_commit=False) + #self.session = session + #self.database = Database(session) + self.messagebroker = messagebroker + LOGGER.debug('Servicer Created') + + # ----- Context ---------------------------------------------------------------------------------------------------- + + @safe_and_metered_rpc_method(METRICS, LOGGER) + def ListContextIds(self, request: Empty, context : grpc.ServicerContext) -> ContextIdList: + def callback(session : Session) -> List[Dict]: + obj_list : List[ContextModel] = session.query(ContextModel).all() + return [obj.dump_id() for obj in obj_list] + return ContextIdList(context_ids=run_transaction(sessionmaker(bind=self.db_engine), callback)) + + @safe_and_metered_rpc_method(METRICS, LOGGER) + def ListContexts(self, request: Empty, context : grpc.ServicerContext) -> ContextList: + def callback(session : Session) -> List[Dict]: + obj_list : List[ContextModel] = session.query(ContextModel).all() + return [obj.dump() for obj in obj_list] + return ContextList(contexts=run_transaction(sessionmaker(bind=self.db_engine), callback)) + + @safe_and_metered_rpc_method(METRICS, LOGGER) + def GetContext(self, request: ContextId, context : grpc.ServicerContext) -> Context: + context_uuid = str(uuid.uuid5(uuid.NAMESPACE_OID, request.context_uuid.uuid)) + def callback(session : Session) -> Optional[Dict]: + obj : Optional[ContextModel] = \ + session.query(ContextModel).filter_by(context_uuid=context_uuid).one_or_none() + return None if obj is None else obj.dump() + obj = run_transaction(sessionmaker(bind=self.db_engine), callback) + if obj is None: raise NotFoundException(ContextModel.__name__.replace('Model', ''), context_uuid) + return Context(**obj) + + @safe_and_metered_rpc_method(METRICS, LOGGER) + def SetContext(self, request: Context, context : grpc.ServicerContext) -> ContextId: + context_uuid = str(uuid.uuid5(uuid.NAMESPACE_OID, request.context_id.context_uuid.uuid)) + context_name = request.context_id.context_uuid.uuid + + for i, topology_id in enumerate(request.topology_ids): + topology_context_uuid = topology_id.context_id.context_uuid.uuid + if topology_context_uuid != context_uuid: + raise InvalidArgumentException( + 'request.topology_ids[{:d}].context_id.context_uuid.uuid'.format(i), topology_context_uuid, + ['should be == {:s}({:s})'.format('request.context_id.context_uuid.uuid', context_uuid)]) + + for i, service_id in enumerate(request.service_ids): + service_context_uuid = service_id.context_id.context_uuid.uuid + if service_context_uuid != context_uuid: + raise InvalidArgumentException( + 'request.service_ids[{:d}].context_id.context_uuid.uuid'.format(i), service_context_uuid, + ['should be == {:s}({:s})'.format('request.context_id.context_uuid.uuid', context_uuid)]) + + def callback(session : Session) -> Tuple[Optional[Dict], bool]: + obj : Optional[ContextModel] = \ + session.query(ContextModel).with_for_update().filter_by(context_uuid=context_uuid).one_or_none() + updated = obj is not None + obj = ContextModel(context_uuid=context_uuid, context_name=context_name) + session.merge(obj) + session.commit() + obj = session.get(ContextModel, {'context_uuid': context_uuid}) + return (None if obj is None else obj.dump_id()), updated + + obj_id,updated = run_transaction(sessionmaker(bind=self.db_engine), callback) + if obj_id is None: raise NotFoundException(ContextModel.__name__.replace('Model', ''), context_uuid) + + #event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE + #notify_event(self.messagebroker, TOPIC_CONTEXT, event_type, {'context_id': obj_id}) + return ContextId(**obj_id) + + @safe_and_metered_rpc_method(METRICS, LOGGER) + def RemoveContext(self, request: ContextId, context : grpc.ServicerContext) -> Empty: + context_uuid = str(uuid.uuid5(uuid.NAMESPACE_OID, request.context_uuid.uuid)) + + def callback(session : Session) -> bool: + num_deleted = session.query(ContextModel).filter_by(context_uuid=context_uuid).delete() + return num_deleted > 0 + + deleted = run_transaction(sessionmaker(bind=self.db_engine), callback) + #if deleted: + # notify_event(self.messagebroker, TOPIC_CONTEXT, EventTypeEnum.EVENTTYPE_REMOVE, {'context_id': request}) + return Empty() + +# @safe_and_metered_rpc_method(METRICS, LOGGER) +# def GetContextEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[ContextEvent]: +# for message in self.messagebroker.consume({TOPIC_CONTEXT}, consume_timeout=CONSUME_TIMEOUT): +# yield ContextEvent(**json.loads(message.content)) + + + # ----- Topology --------------------------------------------------------------------------------------------------- + +# @safe_and_metered_rpc_method(METRICS, LOGGER) +# def ListTopologyIds(self, request: ContextId, context : grpc.ServicerContext) -> TopologyIdList: +# context_uuid = request.context_uuid.uuid +# +# with self.session() as session: +# result = session.query(ContextModel).options(selectinload(ContextModel.topology)).filter_by(context_uuid=context_uuid).one_or_none() +# if not result: +# raise NotFoundException(ContextModel.__name__.replace('Model', ''), context_uuid) +# +# db_topologies = result.topology +# return TopologyIdList(topology_ids=[db_topology.dump_id() for db_topology in db_topologies]) +# +# @safe_and_metered_rpc_method(METRICS, LOGGER) +# def ListTopologies(self, request: ContextId, context : grpc.ServicerContext) -> TopologyList: +# context_uuid = request.context_uuid.uuid +# +# with self.session() as session: +# result = session.query(ContextModel).options(selectinload(ContextModel.topology)).filter_by( +# context_uuid=context_uuid).one_or_none() +# if not result: +# raise NotFoundException(ContextModel.__name__.replace('Model', ''), context_uuid) +# +# db_topologies = result.topology +# return TopologyList(topologies=[db_topology.dump() for db_topology in db_topologies]) +# +# @safe_and_metered_rpc_method(METRICS, LOGGER) +# def GetTopology(self, request: TopologyId, context : grpc.ServicerContext) -> Topology: +# topology_uuid = request.topology_uuid.uuid +# +# result, dump = self.database.get_object(TopologyModel, topology_uuid, True) +# with self.session() as session: +# devs = None +# links = None +# +# filt = {'topology_uuid': topology_uuid} +# topology_devices = session.query(TopologyDeviceModel).filter_by(**filt).all() +# if topology_devices: +# devs = [] +# for td in topology_devices: +# filt = {'device_uuid': td.device_uuid} +# devs.append(session.query(DeviceModel).filter_by(**filt).one()) +# +# filt = {'topology_uuid': topology_uuid} +# topology_links = session.query(TopologyLinkModel).filter_by(**filt).all() +# if topology_links: +# links = [] +# for tl in topology_links: +# filt = {'link_uuid': tl.link_uuid} +# links.append(session.query(LinkModel).filter_by(**filt).one()) +# +# return Topology(**result.dump(devs, links)) +# +# +# @safe_and_metered_rpc_method(METRICS, LOGGER) +# def SetTopology(self, request: Topology, context : grpc.ServicerContext) -> TopologyId: +# context_uuid = request.topology_id.context_id.context_uuid.uuid +# topology_uuid = request.topology_id.topology_uuid.uuid +# with self.session() as session: +# topology_add = TopologyModel(topology_uuid=topology_uuid, context_uuid=context_uuid) +# updated = True +# db_topology = session.query(TopologyModel).join(TopologyModel.context).filter(TopologyModel.topology_uuid==topology_uuid).one_or_none() +# if not db_topology: +# updated = False +# session.merge(topology_add) +# session.commit() +# db_topology = session.query(TopologyModel).join(TopologyModel.context).filter(TopologyModel.topology_uuid==topology_uuid).one_or_none() +# +# for device_id in request.device_ids: +# device_uuid = device_id.device_uuid.uuid +# td = TopologyDeviceModel(topology_uuid=topology_uuid, device_uuid=device_uuid) +# result: Tuple[TopologyDeviceModel, bool] = self.database.create_or_update(td) +# +# +# for link_id in request.link_ids: +# link_uuid = link_id.link_uuid.uuid +# db_link = session.query(LinkModel).filter( +# LinkModel.link_uuid == link_uuid).one_or_none() +# tl = TopologyLinkModel(topology_uuid=topology_uuid, link_uuid=link_uuid) +# result: Tuple[TopologyDeviceModel, bool] = self.database.create_or_update(tl) +# +# +# +# event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE +# dict_topology_id = db_topology.dump_id() +# notify_event(self.messagebroker, TOPIC_TOPOLOGY, event_type, {'topology_id': dict_topology_id}) +# return TopologyId(**dict_topology_id) +# +# @safe_and_metered_rpc_method(METRICS, LOGGER) +# def RemoveTopology(self, request: TopologyId, context : grpc.ServicerContext) -> Empty: +# context_uuid = request.context_id.context_uuid.uuid +# topology_uuid = request.topology_uuid.uuid +# +# with self.session() as session: +# result = session.query(TopologyModel).filter_by(topology_uuid=topology_uuid, context_uuid=context_uuid).one_or_none() +# if not result: +# return Empty() +# dict_topology_id = result.dump_id() +# +# session.query(TopologyModel).filter_by(topology_uuid=topology_uuid, context_uuid=context_uuid).delete() +# session.commit() +# event_type = EventTypeEnum.EVENTTYPE_REMOVE +# notify_event(self.messagebroker, TOPIC_TOPOLOGY, event_type, {'topology_id': dict_topology_id}) +# return Empty() +# +## @safe_and_metered_rpc_method(METRICS, LOGGER) +## def GetTopologyEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[TopologyEvent]: +## for message in self.messagebroker.consume({TOPIC_TOPOLOGY}, consume_timeout=CONSUME_TIMEOUT): +## yield TopologyEvent(**json.loads(message.content)) +# +# +# # ----- Device ----------------------------------------------------------------------------------------------------- +# +# @safe_and_metered_rpc_method(METRICS, LOGGER) +# def ListDeviceIds(self, request: Empty, context : grpc.ServicerContext) -> DeviceIdList: +# with self.session() as session: +# result = session.query(DeviceModel).all() +# return DeviceIdList(device_ids=[device.dump_id() for device in result]) +# +# @safe_and_metered_rpc_method(METRICS, LOGGER) +# def ListDevices(self, request: Empty, context : grpc.ServicerContext) -> DeviceList: +# with self.session() as session: +# result = session.query(DeviceModel).all() +# return DeviceList(devices=[device.dump() for device in result]) +# +# @safe_and_metered_rpc_method(METRICS, LOGGER) +# def GetDevice(self, request: DeviceId, context : grpc.ServicerContext) -> Device: +# device_uuid = request.device_uuid.uuid +# with self.session() as session: +# result = session.query(DeviceModel).filter(DeviceModel.device_uuid == device_uuid).one_or_none() +# if not result: +# raise NotFoundException(DeviceModel.__name__.replace('Model', ''), device_uuid) +# +# rd = result.dump(include_config_rules=True, include_drivers=True, include_endpoints=True) +# +# rt = Device(**rd) +# +# return rt +# +# @safe_and_metered_rpc_method(METRICS, LOGGER) +# def SetDevice(self, request: Device, context : grpc.ServicerContext) -> DeviceId: +# with self.session() as session: +# device_uuid = request.device_id.device_uuid.uuid +# +# for i, endpoint in enumerate(request.device_endpoints): +# endpoint_device_uuid = endpoint.endpoint_id.device_id.device_uuid.uuid +# if len(endpoint_device_uuid) == 0: +# endpoint_device_uuid = device_uuid +# if device_uuid != endpoint_device_uuid: +# raise InvalidArgumentException( +# 'request.device_endpoints[{:d}].device_id.device_uuid.uuid'.format(i), endpoint_device_uuid, +# ['should be == {:s}({:s})'.format('request.device_id.device_uuid.uuid', device_uuid)]) +# +# config_rules = grpc_config_rules_to_raw(request.device_config.config_rules) +# running_config_result = self.update_config(session, device_uuid, 'device', config_rules) +# db_running_config = running_config_result[0][0] +# config_uuid = db_running_config.config_uuid +# running_config_rules = update_config( +# self.database, device_uuid, 'device', request.device_config.config_rules) +# db_running_config = running_config_rules[0][0] +# +# new_obj = DeviceModel(**{ +# 'device_uuid' : device_uuid, +# 'device_type' : request.device_type, +# 'device_operational_status' : grpc_to_enum__device_operational_status(request.device_operational_status), +# 'device_config_uuid' : config_uuid, +# }) +# result: Tuple[DeviceModel, bool] = self.database.create_or_update(new_obj) +# db_device, updated = result +# +# self.set_drivers(db_device, request.device_drivers) +# +# for i, endpoint in enumerate(request.device_endpoints): +# endpoint_uuid = endpoint.endpoint_id.endpoint_uuid.uuid +# # endpoint_device_uuid = endpoint.endpoint_id.device_id.device_uuid.uuid +# # if len(endpoint_device_uuid) == 0: +# # endpoint_device_uuid = device_uuid +# +# endpoint_attributes = { +# 'device_uuid' : db_device.device_uuid, +# 'endpoint_uuid': endpoint_uuid, +# 'endpoint_type': endpoint.endpoint_type, +# } +# +# endpoint_topology_context_uuid = endpoint.endpoint_id.topology_id.context_id.context_uuid.uuid +# endpoint_topology_uuid = endpoint.endpoint_id.topology_id.topology_uuid.uuid +# if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0: +# # str_topology_key = key_to_str([endpoint_topology_context_uuid, endpoint_topology_uuid]) +# +# db_topology, topo_dump = self.database.get_object(TopologyModel, endpoint_topology_uuid) +# +# topology_device = TopologyDeviceModel( +# topology_uuid=endpoint_topology_uuid, +# device_uuid=db_device.device_uuid) +# self.database.create_or_update(topology_device) +# +# endpoint_attributes['topology_uuid'] = db_topology.topology_uuid +# result : Tuple[EndPointModel, bool] = update_or_create_object( +# self.database, EndPointModel, str_endpoint_key, endpoint_attributes) +# db_endpoint, endpoint_updated = result # pylint: disable=unused-variable +# +# new_endpoint = EndPointModel(**endpoint_attributes) +# result: Tuple[EndPointModel, bool] = self.database.create_or_update(new_endpoint) +# db_endpoint, updated = result +# +# self.set_kpi_sample_types(db_endpoint, endpoint.kpi_sample_types) +# +# # event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE +# dict_device_id = db_device.dump_id() +# # notify_event(self.messagebroker, TOPIC_DEVICE, event_type, {'device_id': dict_device_id}) +# +# return DeviceId(**dict_device_id) +# +# def set_kpi_sample_types(self, db_endpoint: EndPointModel, grpc_endpoint_kpi_sample_types): +# db_endpoint_pk = db_endpoint.endpoint_uuid +# for kpi_sample_type in grpc_endpoint_kpi_sample_types: +# orm_kpi_sample_type = grpc_to_enum__kpi_sample_type(kpi_sample_type) +# # str_endpoint_kpi_sample_type_key = key_to_str([db_endpoint_pk, orm_kpi_sample_type.name]) +# data = {'endpoint_uuid': db_endpoint_pk, +# 'kpi_sample_type': orm_kpi_sample_type.name, +# 'kpi_uuid': str(uuid.uuid4())} +# db_endpoint_kpi_sample_type = KpiSampleTypeModel(**data) +# self.database.create(db_endpoint_kpi_sample_type) +# +# def set_drivers(self, db_device: DeviceModel, grpc_device_drivers): +# db_device_pk = db_device.device_uuid +# for driver in grpc_device_drivers: +# orm_driver = grpc_to_enum__device_driver(driver) +# str_device_driver_key = key_to_str([db_device_pk, orm_driver.name]) +# driver_config = { +# # "driver_uuid": str(uuid.uuid4()), +# "device_uuid": db_device_pk, +# "driver": orm_driver.name +# } +# db_device_driver = DriverModel(**driver_config) +# db_device_driver.device_fk = db_device +# db_device_driver.driver = orm_driver +# +# self.database.create_or_update(db_device_driver) +# +# def update_config( +# self, session, db_parent_pk: str, config_name: str, +# raw_config_rules: List[Tuple[ORM_ConfigActionEnum, str, str]] +# ) -> List[Tuple[Union[ConfigModel, ConfigRuleModel], bool]]: +# +# created = False +# +# db_config = session.query(ConfigModel).filter_by(**{ConfigModel.main_pk_name(): db_parent_pk}).one_or_none() +# if not db_config: +# db_config = ConfigModel() +# setattr(db_config, ConfigModel.main_pk_name(), db_parent_pk) +# session.add(db_config) +# session.commit() +# created = True +# +# LOGGER.info('UPDATED-CONFIG: {}'.format(db_config.dump())) +# +# db_objects: List[Tuple[Union[ConfigModel, ConfigRuleModel], bool]] = [(db_config, created)] +# +# for position, (action, resource_key, resource_value) in enumerate(raw_config_rules): +# if action == ORM_ConfigActionEnum.SET: +# result : Tuple[ConfigRuleModel, bool] = self.set_config_rule( +# db_config, position, resource_key, resource_value) +# db_config_rule, updated = result +# db_objects.append((db_config_rule, updated)) +# elif action == ORM_ConfigActionEnum.DELETE: +# self.delete_config_rule(db_config, resource_key) +# else: +# msg = 'Unsupported action({:s}) for resource_key({:s})/resource_value({:s})' +# raise AttributeError( +# msg.format(str(ConfigActionEnum.Name(action)), str(resource_key), str(resource_value))) +# +# return db_objects +# +# def set_config_rule(self, db_config: ConfigModel, position: int, resource_key: str, resource_value: str, +# ): # -> Tuple[ConfigRuleModel, bool]: +# +# from src.context.service.database.Tools import fast_hasher +# str_rule_key_hash = fast_hasher(resource_key) +# str_config_rule_key = key_to_str([db_config.config_uuid, str_rule_key_hash], separator=':') +# pk = str(uuid.uuid5(uuid.UUID('9566448d-e950-425e-b2ae-7ead656c7e47'), str_config_rule_key)) +# data = {'config_rule_uuid': pk, 'config_uuid': db_config.config_uuid, 'position': position, +# 'action': ORM_ConfigActionEnum.SET, 'key': resource_key, 'value': resource_value} +# to_add = ConfigRuleModel(**data) +# +# result, updated = self.database.create_or_update(to_add) +# return result, updated +# +# def delete_config_rule( +# self, db_config: ConfigModel, resource_key: str +# ) -> None: +# +# from src.context.service.database.Tools import fast_hasher +# str_rule_key_hash = fast_hasher(resource_key) +# str_config_rule_key = key_to_str([db_config.pk, str_rule_key_hash], separator=':') +# +# db_config_rule = self.database.get_object(ConfigRuleModel, str_config_rule_key, raise_if_not_found=False) +# +# if db_config_rule is None: +# return +# db_config_rule.delete() +# +# def delete_all_config_rules(self, db_config: ConfigModel) -> None: +# +# db_config_rule_pks = db_config.references(ConfigRuleModel) +# for pk, _ in db_config_rule_pks: ConfigRuleModel(self.database, pk).delete() +# +# """ +# for position, (action, resource_key, resource_value) in enumerate(raw_config_rules): +# if action == ORM_ConfigActionEnum.SET: +# result: Tuple[ConfigRuleModel, bool] = set_config_rule( +# database, db_config, position, resource_key, resource_value) +# db_config_rule, updated = result +# db_objects.append((db_config_rule, updated)) +# elif action == ORM_ConfigActionEnum.DELETE: +# delete_config_rule(database, db_config, resource_key) +# else: +# msg = 'Unsupported action({:s}) for resource_key({:s})/resource_value({:s})' +# raise AttributeError( +# msg.format(str(ConfigActionEnum.Name(action)), str(resource_key), str(resource_value))) +# +# return db_objects +# """ +# +# @safe_and_metered_rpc_method(METRICS, LOGGER) +# def RemoveDevice(self, request: DeviceId, context : grpc.ServicerContext) -> Empty: +# device_uuid = request.device_uuid.uuid +# +# with self.session() as session: +# db_device = session.query(DeviceModel).filter_by(device_uuid=device_uuid).one_or_none() +# +# session.query(TopologyDeviceModel).filter_by(device_uuid=device_uuid).delete() +# session.query(ConfigRuleModel).filter_by(config_uuid=db_device.device_config_uuid).delete() +# session.query(ConfigModel).filter_by(config_uuid=db_device.device_config_uuid).delete() +# +# if not db_device: +# return Empty() +# dict_device_id = db_device.dump_id() +# +# session.query(DeviceModel).filter_by(device_uuid=device_uuid).delete() +# session.commit() +# event_type = EventTypeEnum.EVENTTYPE_REMOVE +# notify_event(self.messagebroker, TOPIC_DEVICE, event_type, {'device_id': dict_device_id}) +# return Empty() +# +## @safe_and_metered_rpc_method(METRICS, LOGGER) +## def GetDeviceEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[DeviceEvent]: +## for message in self.messagebroker.consume({TOPIC_DEVICE}, consume_timeout=CONSUME_TIMEOUT): +## yield DeviceEvent(**json.loads(message.content)) +# +# +# +# +# # ----- Link ------------------------------------------------------------------------------------------------------- +# +# @safe_and_metered_rpc_method(METRICS, LOGGER) +# def ListLinkIds(self, request: Empty, context : grpc.ServicerContext) -> LinkIdList: +# with self.session() as session: +# result = session.query(LinkModel).all() +# return LinkIdList(link_ids=[db_link.dump_id() for db_link in result]) +# +# +# @safe_and_metered_rpc_method(METRICS, LOGGER) +# def ListLinks(self, request: Empty, context : grpc.ServicerContext) -> LinkList: +# with self.session() as session: +# link_list = LinkList() +# +# db_links = session.query(LinkModel).all() +# +# for db_link in db_links: +# link_uuid = db_link.link_uuid +# filt = {'link_uuid': link_uuid} +# link_endpoints = session.query(LinkEndPointModel).filter_by(**filt).all() +# if link_endpoints: +# eps = [] +# for lep in link_endpoints: +# filt = {'endpoint_uuid': lep.endpoint_uuid} +# eps.append(session.query(EndPointModel).filter_by(**filt).one()) +# link_list.links.append(Link(**db_link.dump(eps))) +# +# return link_list +# +# @safe_and_metered_rpc_method(METRICS, LOGGER) +# def GetLink(self, request: LinkId, context : grpc.ServicerContext) -> Link: +# link_uuid = request.link_uuid.uuid +# with self.session() as session: +# result = session.query(LinkModel).filter(LinkModel.link_uuid == link_uuid).one_or_none() +# if not result: +# raise NotFoundException(LinkModel.__name__.replace('Model', ''), link_uuid) +# +# filt = {'link_uuid': link_uuid} +# link_endpoints = session.query(LinkEndPointModel).filter_by(**filt).all() +# if link_endpoints: +# eps = [] +# for lep in link_endpoints: +# filt = {'endpoint_uuid': lep.endpoint_uuid} +# eps.append(session.query(EndPointModel).filter_by(**filt).one()) +# return Link(**result.dump(eps)) +# +# rd = result.dump() +# rt = Link(**rd) +# +# return rt +# +# +# +# @safe_and_metered_rpc_method(METRICS, LOGGER) +# def SetLink(self, request: Link, context : grpc.ServicerContext) -> LinkId: +# link_uuid = request.link_id.link_uuid.uuid +# +# new_link = LinkModel(**{ +# 'link_uuid': link_uuid +# }) +# result: Tuple[LinkModel, bool] = self.database.create_or_update(new_link) +# db_link, updated = result +# +# for endpoint_id in request.link_endpoint_ids: +# endpoint_uuid = endpoint_id.endpoint_uuid.uuid +# endpoint_device_uuid = endpoint_id.device_id.device_uuid.uuid +# endpoint_topology_uuid = endpoint_id.topology_id.topology_uuid.uuid +# endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid +# +# +# db_topology = None +# if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0: +# db_topology: TopologyModel = self.database.get_object(TopologyModel, endpoint_topology_uuid) +# # check device is in topology +# self.database.get_object(TopologyDeviceModel, endpoint_device_uuid) +# +# +# link_endpoint = LinkEndPointModel(link_uuid=link_uuid, endpoint_uuid=endpoint_uuid) +# result: Tuple[LinkEndPointModel, bool] = self.database.create_or_update(link_endpoint) +# +# if db_topology is not None: +# topology_link = TopologyLinkModel(topology_uuid=endpoint_topology_uuid, link_uuid=link_uuid) +# result: Tuple[TopologyLinkModel, bool] = self.database.create_or_update(topology_link) +# +# event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE +# dict_link_id = db_link.dump_id() +# notify_event(self.messagebroker, TOPIC_LINK, event_type, {'link_id': dict_link_id}) +# return LinkId(**dict_link_id) +# +# @safe_and_metered_rpc_method(METRICS, LOGGER) +# def RemoveLink(self, request: LinkId, context : grpc.ServicerContext) -> Empty: +# with self.session() as session: +# link_uuid = request.link_uuid.uuid +# +# session.query(TopologyLinkModel).filter_by(link_uuid=link_uuid).delete() +# session.query(LinkEndPointModel).filter_by(link_uuid=link_uuid).delete() +# +# result = session.query(LinkModel).filter_by(link_uuid=link_uuid).one_or_none() +# if not result: +# return Empty() +# dict_link_id = result.dump_id() +# +# session.query(LinkModel).filter_by(link_uuid=link_uuid).delete() +# session.commit() +# event_type = EventTypeEnum.EVENTTYPE_REMOVE +# notify_event(self.messagebroker, TOPIC_LINK, event_type, {'link_id': dict_link_id}) +# return Empty() +# +## @safe_and_metered_rpc_method(METRICS, LOGGER) +## def GetLinkEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[LinkEvent]: +## for message in self.messagebroker.consume({TOPIC_LINK}, consume_timeout=CONSUME_TIMEOUT): +## yield LinkEvent(**json.loads(message.content)) +# +# +# # ----- Service ---------------------------------------------------------------------------------------------------- +# +# @safe_and_metered_rpc_method(METRICS, LOGGER) +# def ListServiceIds(self, request: ContextId, context : grpc.ServicerContext) -> ServiceIdList: +# context_uuid = request.context_uuid.uuid +# +# with self.session() as session: +# db_services = session.query(ServiceModel).filter_by(context_uuid=context_uuid).all() +# return ServiceIdList(service_ids=[db_service.dump_id() for db_service in db_services]) +# +# @safe_and_metered_rpc_method(METRICS, LOGGER) +# def ListServices(self, request: ContextId, context : grpc.ServicerContext) -> ServiceList: +# context_uuid = request.context_uuid.uuid +# +# with self.session() as session: +# db_services = session.query(ServiceModel).filter_by(context_uuid=context_uuid).all() +# return ServiceList(services=[db_service.dump() for db_service in db_services]) +# +# +# +# @safe_and_metered_rpc_method(METRICS, LOGGER) +# def GetService(self, request: ServiceId, context : grpc.ServicerContext) -> Service: +# service_uuid = request.service_uuid.uuid +# with self.session() as session: +# result = session.query(ServiceModel).filter_by(service_uuid=service_uuid).one_or_none() +# +# if not result: +# raise NotFoundException(ServiceModel.__name__.replace('Model', ''), service_uuid) +# +# return Service(**result.dump()) +# +# def set_constraint(self, db_constraints: ConstraintsModel, grpc_constraint: Constraint, position: int +# ) -> Tuple[Union_ConstraintModel, bool]: +# with self.session() as session: +# +# grpc_constraint_kind = str(grpc_constraint.WhichOneof('constraint')) +# +# parser = CONSTRAINT_PARSERS.get(grpc_constraint_kind) +# if parser is None: +# raise NotImplementedError('Constraint of kind {:s} is not implemented: {:s}'.format( +# grpc_constraint_kind, grpc_message_to_json_string(grpc_constraint))) +# +# # create specific constraint +# constraint_class, str_constraint_id, constraint_data, constraint_kind = parser(grpc_constraint) +# str_constraint_id = str(uuid.uuid4()) +# LOGGER.info('str_constraint_id: {}'.format(str_constraint_id)) +# # str_constraint_key_hash = fast_hasher(':'.join([constraint_kind.value, str_constraint_id])) +# # str_constraint_key = key_to_str([db_constraints.pk, str_constraint_key_hash], separator=':') +# +# # result : Tuple[Union_ConstraintModel, bool] = update_or_create_object( +# # database, constraint_class, str_constraint_key, constraint_data) +# constraint_data[constraint_class.main_pk_name()] = str_constraint_id +# db_new_constraint = constraint_class(**constraint_data) +# result: Tuple[Union_ConstraintModel, bool] = self.database.create_or_update(db_new_constraint) +# db_specific_constraint, updated = result +# +# # create generic constraint +# # constraint_fk_field_name = 'constraint_uuid'.format(constraint_kind.value) +# constraint_data = { +# 'constraints_uuid': db_constraints.constraints_uuid, 'position': position, 'kind': constraint_kind +# } +# +# db_new_constraint = ConstraintModel(**constraint_data) +# result: Tuple[Union_ConstraintModel, bool] = self.database.create_or_update(db_new_constraint) +# db_constraint, updated = result +# +# return db_constraint, updated +# +# def set_constraints(self, service_uuid: str, constraints_name : str, grpc_constraints +# ) -> List[Tuple[Union[ConstraintsModel, ConstraintModel], bool]]: +# with self.session() as session: +# # str_constraints_key = key_to_str([db_parent_pk, constraints_name], separator=':') +# # result : Tuple[ConstraintsModel, bool] = get_or_create_object(database, ConstraintsModel, str_constraints_key) +# result = session.query(ConstraintsModel).filter_by(constraints_uuid=service_uuid).one_or_none() +# created = None +# if result: +# created = True +# session.query(ConstraintsModel).filter_by(constraints_uuid=service_uuid).one_or_none() +# db_constraints = ConstraintsModel(constraints_uuid=service_uuid) +# session.add(db_constraints) +# +# db_objects = [(db_constraints, created)] +# +# for position,grpc_constraint in enumerate(grpc_constraints): +# result : Tuple[ConstraintModel, bool] = self.set_constraint( +# db_constraints, grpc_constraint, position) +# db_constraint, updated = result +# db_objects.append((db_constraint, updated)) +# +# return db_objects +# +# @safe_and_metered_rpc_method(METRICS, LOGGER) +# def SetService(self, request: Service, context : grpc.ServicerContext) -> ServiceId: +# with self.lock: +# with self.session() as session: +# +# context_uuid = request.service_id.context_id.context_uuid.uuid +# # db_context : ContextModel = get_object(self.database, ContextModel, context_uuid) +# db_context = session.query(ContextModel).filter_by(context_uuid=context_uuid).one_or_none() +# +# for i,endpoint_id in enumerate(request.service_endpoint_ids): +# endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid +# if len(endpoint_topology_context_uuid) > 0 and context_uuid != endpoint_topology_context_uuid: +# raise InvalidArgumentException( +# 'request.service_endpoint_ids[{:d}].topology_id.context_id.context_uuid.uuid'.format(i), +# endpoint_topology_context_uuid, +# ['should be == {:s}({:s})'.format( +# 'request.service_id.context_id.context_uuid.uuid', context_uuid)]) +# +# service_uuid = request.service_id.service_uuid.uuid +# # str_service_key = key_to_str([context_uuid, service_uuid]) +# +# constraints_result = self.set_constraints(service_uuid, 'constraints', request.service_constraints) +# db_constraints = constraints_result[0][0] +# +# config_rules = grpc_config_rules_to_raw(request.service_config.config_rules) +# running_config_result = update_config(self.database, str_service_key, 'running', config_rules) +# db_running_config = running_config_result[0][0] +# +# result : Tuple[ServiceModel, bool] = update_or_create_object(self.database, ServiceModel, str_service_key, { +# 'context_fk' : db_context, +# 'service_uuid' : service_uuid, +# 'service_type' : grpc_to_enum__service_type(request.service_type), +# 'service_constraints_fk': db_constraints, +# 'service_status' : grpc_to_enum__service_status(request.service_status.service_status), +# 'service_config_fk' : db_running_config, +# }) +# db_service, updated = result +# +# for i,endpoint_id in enumerate(request.service_endpoint_ids): +# endpoint_uuid = endpoint_id.endpoint_uuid.uuid +# endpoint_device_uuid = endpoint_id.device_id.device_uuid.uuid +# endpoint_topology_uuid = endpoint_id.topology_id.topology_uuid.uuid +# endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid +# +# str_endpoint_key = key_to_str([endpoint_device_uuid, endpoint_uuid]) +# if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0: +# str_topology_key = key_to_str([endpoint_topology_context_uuid, endpoint_topology_uuid]) +# str_endpoint_key = key_to_str([str_endpoint_key, str_topology_key], separator=':') +# +# db_endpoint : EndPointModel = get_object(self.database, EndPointModel, str_endpoint_key) +# +# str_service_endpoint_key = key_to_str([service_uuid, str_endpoint_key], separator='--') +# result : Tuple[ServiceEndPointModel, bool] = get_or_create_object( +# self.database, ServiceEndPointModel, str_service_endpoint_key, { +# 'service_fk': db_service, 'endpoint_fk': db_endpoint}) +# #db_service_endpoint, service_endpoint_created = result +# +# event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE +# dict_service_id = db_service.dump_id() +# notify_event(self.messagebroker, TOPIC_SERVICE, event_type, {'service_id': dict_service_id}) +# return ServiceId(**dict_service_id) +# context_uuid = request.service_id.context_id.context_uuid.uuid +# db_context : ContextModel = get_object(self.database, ContextModel, context_uuid) +# +# for i,endpoint_id in enumerate(request.service_endpoint_ids): +# endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid +# if len(endpoint_topology_context_uuid) > 0 and context_uuid != endpoint_topology_context_uuid: +# raise InvalidArgumentException( +# 'request.service_endpoint_ids[{:d}].topology_id.context_id.context_uuid.uuid'.format(i), +# endpoint_topology_context_uuid, +# ['should be == {:s}({:s})'.format( +# 'request.service_id.context_id.context_uuid.uuid', context_uuid)]) +# +# service_uuid = request.service_id.service_uuid.uuid +# str_service_key = key_to_str([context_uuid, service_uuid]) +# +# constraints_result = set_constraints( +# self.database, str_service_key, 'service', request.service_constraints) +# db_constraints = constraints_result[0][0] +# +# running_config_rules = update_config( +# self.database, str_service_key, 'service', request.service_config.config_rules) +# db_running_config = running_config_rules[0][0] +# +# result : Tuple[ServiceModel, bool] = update_or_create_object(self.database, ServiceModel, str_service_key, { +# 'context_fk' : db_context, +# 'service_uuid' : service_uuid, +# 'service_type' : grpc_to_enum__service_type(request.service_type), +# 'service_constraints_fk': db_constraints, +# 'service_status' : grpc_to_enum__service_status(request.service_status.service_status), +# 'service_config_fk' : db_running_config, +# }) +# db_service, updated = result +# +# for i,endpoint_id in enumerate(request.service_endpoint_ids): +# endpoint_uuid = endpoint_id.endpoint_uuid.uuid +# endpoint_device_uuid = endpoint_id.device_id.device_uuid.uuid +# endpoint_topology_uuid = endpoint_id.topology_id.topology_uuid.uuid +# endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid +# +# str_endpoint_key = key_to_str([endpoint_device_uuid, endpoint_uuid]) +# if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0: +# str_topology_key = key_to_str([endpoint_topology_context_uuid, endpoint_topology_uuid]) +# str_endpoint_key = key_to_str([str_endpoint_key, str_topology_key], separator=':') +# +# db_endpoint : EndPointModel = get_object(self.database, EndPointModel, str_endpoint_key) +# +# str_service_endpoint_key = key_to_str([service_uuid, str_endpoint_key], separator='--') +# result : Tuple[ServiceEndPointModel, bool] = get_or_create_object( +# self.database, ServiceEndPointModel, str_service_endpoint_key, { +# 'service_fk': db_service, 'endpoint_fk': db_endpoint}) +# #db_service_endpoint, service_endpoint_created = result +# +# event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE +# dict_service_id = db_service.dump_id() +# notify_event(self.messagebroker, TOPIC_SERVICE, event_type, {'service_id': dict_service_id}) +# return ServiceId(**dict_service_id) +# +# @safe_and_metered_rpc_method(METRICS, LOGGER) +# def RemoveService(self, request: ServiceId, context : grpc.ServicerContext) -> Empty: +# with self.lock: +# context_uuid = request.context_id.context_uuid.uuid +# service_uuid = request.service_uuid.uuid +# db_service = ServiceModel(self.database, key_to_str([context_uuid, service_uuid]), auto_load=False) +# found = db_service.load() +# if not found: return Empty() +# +# dict_service_id = db_service.dump_id() +# db_service.delete() +# +# event_type = EventTypeEnum.EVENTTYPE_REMOVE +# notify_event(self.messagebroker, TOPIC_SERVICE, event_type, {'service_id': dict_service_id}) +# return Empty() +# +## @safe_and_metered_rpc_method(METRICS, LOGGER) +## def GetServiceEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[ServiceEvent]: +## for message in self.messagebroker.consume({TOPIC_SERVICE}, consume_timeout=CONSUME_TIMEOUT): +## yield ServiceEvent(**json.loads(message.content)) +# +# +# # ----- Slice ---------------------------------------------------------------------------------------------------- +# +# @safe_and_metered_rpc_method(METRICS, LOGGER) +# def ListSliceIds(self, request: ContextId, context : grpc.ServicerContext) -> SliceIdList: +# with self.lock: +# db_context : ContextModel = get_object(self.database, ContextModel, request.context_uuid.uuid) +# db_slices : Set[SliceModel] = get_related_objects(db_context, SliceModel) +# db_slices = sorted(db_slices, key=operator.attrgetter('pk')) +# return SliceIdList(slice_ids=[db_slice.dump_id() for db_slice in db_slices]) +# +# @safe_and_metered_rpc_method(METRICS, LOGGER) +# def ListSlices(self, request: ContextId, context : grpc.ServicerContext) -> SliceList: +# with self.lock: +# db_context : ContextModel = get_object(self.database, ContextModel, request.context_uuid.uuid) +# db_slices : Set[SliceModel] = get_related_objects(db_context, SliceModel) +# db_slices = sorted(db_slices, key=operator.attrgetter('pk')) +# return SliceList(slices=[db_slice.dump() for db_slice in db_slices]) +# +# @safe_and_metered_rpc_method(METRICS, LOGGER) +# def GetSlice(self, request: SliceId, context : grpc.ServicerContext) -> Slice: +# with self.lock: +# str_key = key_to_str([request.context_id.context_uuid.uuid, request.slice_uuid.uuid]) +# db_slice : SliceModel = get_object(self.database, SliceModel, str_key) +# return Slice(**db_slice.dump( +# include_endpoint_ids=True, include_constraints=True, include_config_rules=True, +# include_service_ids=True, include_subslice_ids=True)) +# +# @safe_and_metered_rpc_method(METRICS, LOGGER) +# def SetSlice(self, request: Slice, context : grpc.ServicerContext) -> SliceId: +# with self.lock: +# context_uuid = request.slice_id.context_id.context_uuid.uuid +# db_context : ContextModel = get_object(self.database, ContextModel, context_uuid) +# +# for i,endpoint_id in enumerate(request.slice_endpoint_ids): +# endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid +# if len(endpoint_topology_context_uuid) > 0 and context_uuid != endpoint_topology_context_uuid: +# raise InvalidArgumentException( +# 'request.slice_endpoint_ids[{:d}].topology_id.context_id.context_uuid.uuid'.format(i), +# endpoint_topology_context_uuid, +# ['should be == {:s}({:s})'.format( +# 'request.slice_id.context_id.context_uuid.uuid', context_uuid)]) +# +# slice_uuid = request.slice_id.slice_uuid.uuid +# str_slice_key = key_to_str([context_uuid, slice_uuid]) +# +# constraints_result = set_constraints( +# self.database, str_slice_key, 'slice', request.slice_constraints) +# db_constraints = constraints_result[0][0] +# +# running_config_rules = update_config( +# self.database, str_slice_key, 'slice', request.slice_config.config_rules) +# db_running_config = running_config_rules[0][0] +# +# result : Tuple[SliceModel, bool] = update_or_create_object(self.database, SliceModel, str_slice_key, { +# 'context_fk' : db_context, +# 'slice_uuid' : slice_uuid, +# 'slice_constraints_fk': db_constraints, +# 'slice_status' : grpc_to_enum__slice_status(request.slice_status.slice_status), +# 'slice_config_fk' : db_running_config, +# 'slice_owner_uuid' : request.slice_owner.owner_uuid.uuid, +# 'slice_owner_string' : request.slice_owner.owner_string, +# }) +# db_slice, updated = result +# +# for i,endpoint_id in enumerate(request.slice_endpoint_ids): +# endpoint_uuid = endpoint_id.endpoint_uuid.uuid +# endpoint_device_uuid = endpoint_id.device_id.device_uuid.uuid +# endpoint_topology_uuid = endpoint_id.topology_id.topology_uuid.uuid +# endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid +# +# str_endpoint_key = key_to_str([endpoint_device_uuid, endpoint_uuid]) +# if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0: +# str_topology_key = key_to_str([endpoint_topology_context_uuid, endpoint_topology_uuid]) +# str_endpoint_key = key_to_str([str_endpoint_key, str_topology_key], separator=':') +# +# db_endpoint : EndPointModel = get_object(self.database, EndPointModel, str_endpoint_key) +# +# str_slice_endpoint_key = key_to_str([str_slice_key, str_endpoint_key], separator='--') +# result : Tuple[SliceEndPointModel, bool] = get_or_create_object( +# self.database, SliceEndPointModel, str_slice_endpoint_key, { +# 'slice_fk': db_slice, 'endpoint_fk': db_endpoint}) +# #db_slice_endpoint, slice_endpoint_created = result +# +# for i,service_id in enumerate(request.slice_service_ids): +# service_uuid = service_id.service_uuid.uuid +# service_context_uuid = service_id.context_id.context_uuid.uuid +# str_service_key = key_to_str([service_context_uuid, service_uuid]) +# db_service : ServiceModel = get_object(self.database, ServiceModel, str_service_key) +# +# str_slice_service_key = key_to_str([str_slice_key, str_service_key], separator='--') +# result : Tuple[SliceServiceModel, bool] = get_or_create_object( +# self.database, SliceServiceModel, str_slice_service_key, { +# 'slice_fk': db_slice, 'service_fk': db_service}) +# #db_slice_service, slice_service_created = result +# +# for i,subslice_id in enumerate(request.slice_subslice_ids): +# subslice_uuid = subslice_id.slice_uuid.uuid +# subslice_context_uuid = subslice_id.context_id.context_uuid.uuid +# str_subslice_key = key_to_str([subslice_context_uuid, subslice_uuid]) +# db_subslice : SliceModel = get_object(self.database, SliceModel, str_subslice_key) +# +# str_slice_subslice_key = key_to_str([str_slice_key, str_subslice_key], separator='--') +# result : Tuple[SliceSubSliceModel, bool] = get_or_create_object( +# self.database, SliceSubSliceModel, str_slice_subslice_key, { +# 'slice_fk': db_slice, 'sub_slice_fk': db_subslice}) +# #db_slice_subslice, slice_subslice_created = result +# +# event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE +# dict_slice_id = db_slice.dump_id() +# notify_event(self.messagebroker, TOPIC_SLICE, event_type, {'slice_id': dict_slice_id}) +# return SliceId(**dict_slice_id) +# +# @safe_and_metered_rpc_method(METRICS, LOGGER) +# def UnsetSlice(self, request: Slice, context : grpc.ServicerContext) -> SliceId: +# with self.lock: +# context_uuid = request.slice_id.context_id.context_uuid.uuid +# db_context : ContextModel = get_object(self.database, ContextModel, context_uuid) +# +# for i,endpoint_id in enumerate(request.slice_endpoint_ids): +# endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid +# if len(endpoint_topology_context_uuid) > 0 and context_uuid != endpoint_topology_context_uuid: +# raise InvalidArgumentException( +# 'request.slice_endpoint_ids[{:d}].topology_id.context_id.context_uuid.uuid'.format(i), +# endpoint_topology_context_uuid, +# ['should be == {:s}({:s})'.format( +# 'request.slice_id.context_id.context_uuid.uuid', context_uuid)]) +# +# slice_uuid = request.slice_id.slice_uuid.uuid +# str_slice_key = key_to_str([context_uuid, slice_uuid]) +# +# if len(request.slice_constraints) > 0: +# raise NotImplementedError('UnsetSlice: removal of constraints') +# if len(request.slice_config.config_rules) > 0: +# raise NotImplementedError('UnsetSlice: removal of config rules') +# if len(request.slice_endpoint_ids) > 0: +# raise NotImplementedError('UnsetSlice: removal of endpoints') +# +# updated = False +# +# for service_id in request.slice_service_ids: +# service_uuid = service_id.service_uuid.uuid +# service_context_uuid = service_id.context_id.context_uuid.uuid +# str_service_key = key_to_str([service_context_uuid, service_uuid]) +# str_slice_service_key = key_to_str([str_slice_key, str_service_key], separator='--') +# SliceServiceModel(self.database, str_slice_service_key).delete() +# updated = True +# +# for subslice_id in request.slice_subslice_ids: +# subslice_uuid = subslice_id.slice_uuid.uuid +# subslice_context_uuid = subslice_id.context_id.context_uuid.uuid +# str_subslice_key = key_to_str([subslice_context_uuid, subslice_uuid]) +# str_slice_subslice_key = key_to_str([str_slice_key, str_subslice_key], separator='--') +# SliceSubSliceModel(self.database, str_slice_subslice_key).delete() +# updated = True +# +# event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE +# db_slice : SliceModel = get_object(self.database, SliceModel, str_slice_key) +# dict_slice_id = db_slice.dump_id() +# notify_event(self.messagebroker, TOPIC_SLICE, event_type, {'slice_id': dict_slice_id}) +# return SliceId(**dict_slice_id) +# +# @safe_and_metered_rpc_method(METRICS, LOGGER) +# def RemoveSlice(self, request: SliceId, context : grpc.ServicerContext) -> Empty: +# with self.lock: +# context_uuid = request.context_id.context_uuid.uuid +# slice_uuid = request.slice_uuid.uuid +# db_slice = SliceModel(self.database, key_to_str([context_uuid, slice_uuid]), auto_load=False) +# found = db_slice.load() +# if not found: return Empty() +# +# dict_slice_id = db_slice.dump_id() +# db_slice.delete() +# +# event_type = EventTypeEnum.EVENTTYPE_REMOVE +# notify_event(self.messagebroker, TOPIC_SLICE, event_type, {'slice_id': dict_slice_id}) +# return Empty() +# +## @safe_and_metered_rpc_method(METRICS, LOGGER) +## def GetSliceEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[SliceEvent]: +## for message in self.messagebroker.consume({TOPIC_SLICE}, consume_timeout=CONSUME_TIMEOUT): +## yield SliceEvent(**json.loads(message.content)) +# +# +# # ----- Connection ------------------------------------------------------------------------------------------------- +# +# @safe_and_metered_rpc_method(METRICS, LOGGER) +# def ListConnectionIds(self, request: ServiceId, context : grpc.ServicerContext) -> ConnectionIdList: +# with self.session() as session: +# result = session.query(DeviceModel).all() +# return DeviceIdList(device_ids=[device.dump_id() for device in result]) +# +# with self.lock: +# str_key = key_to_str([request.context_id.context_uuid.uuid, request.service_uuid.uuid]) +# db_service : ServiceModel = get_object(self.database, ServiceModel, str_key) +# db_connections : Set[ConnectionModel] = get_related_objects(db_service, ConnectionModel) +# db_connections = sorted(db_connections, key=operator.attrgetter('pk')) +# return ConnectionIdList(connection_ids=[db_connection.dump_id() for db_connection in db_connections]) +# +# @safe_and_metered_rpc_method(METRICS, LOGGER) +# def ListConnections(self, request: ContextId, context : grpc.ServicerContext) -> ServiceList: +# with self.lock: +# str_key = key_to_str([request.context_id.context_uuid.uuid, request.service_uuid.uuid]) +# db_service : ServiceModel = get_object(self.database, ServiceModel, str_key) +# db_connections : Set[ConnectionModel] = get_related_objects(db_service, ConnectionModel) +# db_connections = sorted(db_connections, key=operator.attrgetter('pk')) +# return ConnectionList(connections=[db_connection.dump() for db_connection in db_connections]) +# +# @safe_and_metered_rpc_method(METRICS, LOGGER) +# def GetConnection(self, request: ConnectionId, context : grpc.ServicerContext) -> Connection: +# with self.lock: +# db_connection : ConnectionModel = get_object(self.database, ConnectionModel, request.connection_uuid.uuid) +# return Connection(**db_connection.dump(include_path=True, include_sub_service_ids=True)) +# +# @safe_and_metered_rpc_method(METRICS, LOGGER) +# def SetConnection(self, request: Connection, context : grpc.ServicerContext) -> ConnectionId: +# with self.lock: +# connection_uuid = request.connection_id.connection_uuid.uuid +# +# connection_attributes = {'connection_uuid': connection_uuid} +# +# service_context_uuid = request.service_id.context_id.context_uuid.uuid +# service_uuid = request.service_id.service_uuid.uuid +# if len(service_context_uuid) > 0 and len(service_uuid) > 0: +# str_service_key = key_to_str([service_context_uuid, service_uuid]) +# db_service : ServiceModel = get_object(self.database, ServiceModel, str_service_key) +# connection_attributes['service_fk'] = db_service +# +# path_hops_result = set_path(self.database, connection_uuid, request.path_hops_endpoint_ids, path_name = '') +# db_path = path_hops_result[0] +# connection_attributes['path_fk'] = db_path +# +# result : Tuple[ConnectionModel, bool] = update_or_create_object( +# self.database, ConnectionModel, connection_uuid, connection_attributes) +# db_connection, updated = result +# +# for sub_service_id in request.sub_service_ids: +# sub_service_uuid = sub_service_id.service_uuid.uuid +# sub_service_context_uuid = sub_service_id.context_id.context_uuid.uuid +# str_sub_service_key = key_to_str([sub_service_context_uuid, sub_service_uuid]) +# db_service : ServiceModel = get_object(self.database, ServiceModel, str_sub_service_key) +# +# str_connection_sub_service_key = key_to_str([connection_uuid, str_sub_service_key], separator='--') +# result : Tuple[ConnectionSubServiceModel, bool] = get_or_create_object( +# self.database, ConnectionSubServiceModel, str_connection_sub_service_key, { +# 'connection_fk': db_connection, 'sub_service_fk': db_service}) +# #db_connection_sub_service, connection_sub_service_created = result +# +# event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE +# dict_connection_id = db_connection.dump_id() +# notify_event(self.messagebroker, TOPIC_CONNECTION, event_type, {'connection_id': dict_connection_id}) +# return ConnectionId(**dict_connection_id) +# +# @safe_and_metered_rpc_method(METRICS, LOGGER) +# def RemoveConnection(self, request: ConnectionId, context : grpc.ServicerContext) -> Empty: +# with self.lock: +# db_connection = ConnectionModel(self.database, request.connection_uuid.uuid, auto_load=False) +# found = db_connection.load() +# if not found: return Empty() +# +# dict_connection_id = db_connection.dump_id() +# db_connection.delete() +# +# event_type = EventTypeEnum.EVENTTYPE_REMOVE +# notify_event(self.messagebroker, TOPIC_CONNECTION, event_type, {'connection_id': dict_connection_id}) +# return Empty() +# +## @safe_and_metered_rpc_method(METRICS, LOGGER) +## def GetConnectionEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[ConnectionEvent]: +## for message in self.messagebroker.consume({TOPIC_CONNECTION}, consume_timeout=CONSUME_TIMEOUT): +## yield ConnectionEvent(**json.loads(message.content)) +# +# +# # ----- Policy ----------------------------------------------------------------------------------------------------- +# +# @safe_and_metered_rpc_method(METRICS, LOGGER) +# def ListPolicyRuleIds(self, request: Empty, context: grpc.ServicerContext) -> PolicyRuleIdList: +# with self.lock: +# db_policy_rules: List[PolicyRuleModel] = get_all_objects(self.database, PolicyRuleModel) +# db_policy_rules = sorted(db_policy_rules, key=operator.attrgetter('pk')) +# return PolicyRuleIdList(policyRuleIdList=[db_policy_rule.dump_id() for db_policy_rule in db_policy_rules]) +# +# @safe_and_metered_rpc_method(METRICS, LOGGER) +# def ListPolicyRules(self, request: Empty, context: grpc.ServicerContext) -> PolicyRuleList: +# with self.lock: +# db_policy_rules: List[PolicyRuleModel] = get_all_objects(self.database, PolicyRuleModel) +# db_policy_rules = sorted(db_policy_rules, key=operator.attrgetter('pk')) +# return PolicyRuleList(policyRules=[db_policy_rule.dump() for db_policy_rule in db_policy_rules]) +# +# @safe_and_metered_rpc_method(METRICS, LOGGER) +# def GetPolicyRule(self, request: PolicyRuleId, context: grpc.ServicerContext) -> PolicyRule: +# with self.lock: +# policy_rule_uuid = request.uuid.uuid +# db_policy_rule: PolicyRuleModel = get_object(self.database, PolicyRuleModel, policy_rule_uuid) +# return PolicyRule(**db_policy_rule.dump()) +# +# @safe_and_metered_rpc_method(METRICS, LOGGER) +# def SetPolicyRule(self, request: PolicyRule, context: grpc.ServicerContext) -> PolicyRuleId: +# with self.lock: +# policy_rule_type = request.WhichOneof('policy_rule') +# policy_rule_json = grpc_message_to_json(request) +# policy_rule_uuid = policy_rule_json[policy_rule_type]['policyRuleBasic']['policyRuleId']['uuid']['uuid'] +# result: Tuple[PolicyRuleModel, bool] = update_or_create_object( +# self.database, PolicyRuleModel, policy_rule_uuid, {'value': json.dumps(policy_rule_json)}) +# db_policy, updated = result # pylint: disable=unused-variable +# +# #event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE +# dict_policy_id = db_policy.dump_id() +# #notify_event(self.messagebroker, TOPIC_POLICY, event_type, {"policy_id": dict_policy_id}) +# return PolicyRuleId(**dict_policy_id) +# +# @safe_and_metered_rpc_method(METRICS, LOGGER) +# def RemovePolicyRule(self, request: PolicyRuleId, context: grpc.ServicerContext) -> Empty: +# with self.lock: +# policy_uuid = request.uuid.uuid +# db_policy = PolicyRuleModel(self.database, policy_uuid, auto_load=False) +# found = db_policy.load() +# if not found: return Empty() +# +# dict_policy_id = db_policy.dump_id() +# db_policy.delete() +# #event_type = EventTypeEnum.EVENTTYPE_REMOVE +# #notify_event(self.messagebroker, TOPIC_POLICY, event_type, {"policy_id": dict_policy_id}) +# return Empty() +# \ No newline at end of file diff --git a/src/context/service/Database.py b/src/context/service/Database.py index 2b699203a..8aa568239 100644 --- a/src/context/service/Database.py +++ b/src/context/service/Database.py @@ -2,7 +2,7 @@ from typing import Tuple, List from sqlalchemy import MetaData from sqlalchemy.orm import Session, joinedload -from context.service.database.Base import Base +from context.service.database._Base import Base import logging from common.orm.backend.Tools import key_to_str diff --git a/src/context/service/Engine.py b/src/context/service/Engine.py new file mode 100644 index 000000000..7944d8601 --- /dev/null +++ b/src/context/service/Engine.py @@ -0,0 +1,40 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, sqlalchemy, sqlalchemy_utils +from common.Settings import get_setting + +LOGGER = logging.getLogger(__name__) + +APP_NAME = 'tfs' + +class Engine: + def get_engine(self) -> sqlalchemy.engine.Engine: + ccdb_url = get_setting('CCDB_URL') + + try: + engine = sqlalchemy.create_engine( + ccdb_url, connect_args={'application_name': APP_NAME}, echo=False, future=True) + except: # pylint: disable=bare-except + LOGGER.exception('Failed to connect to database: {:s}'.format(ccdb_url)) + return None + + try: + if not sqlalchemy_utils.database_exists(engine.url): + sqlalchemy_utils.create_database(engine.url) + except: # pylint: disable=bare-except + LOGGER.exception('Failed to check/create to database: {:s}'.format(ccdb_url)) + return None + + return engine diff --git a/src/context/service/__main__.py b/src/context/service/__main__.py index 34942ec82..c5bbcc3f2 100644 --- a/src/context/service/__main__.py +++ b/src/context/service/__main__.py @@ -14,85 +14,52 @@ import logging, signal, sys, threading from prometheus_client import start_http_server -from common.Settings import get_log_level, get_metrics_port, get_setting +from common.Settings import get_log_level, get_metrics_port from common.message_broker.Factory import get_messagebroker_backend from common.message_broker.MessageBroker import MessageBroker -from context.Config import POPULATE_FAKE_DATA -from sqlalchemy.orm import sessionmaker, declarative_base -from context.service.database.Base import Base -from .grpc_server.ContextService import ContextService -from .rest_server.Resources import RESOURCES -from .rest_server.RestServer import RestServer -from .Populate import populate -# from models import Device, EndPoint, EndPointId, DeviceDriverEnum, DeviceOperationalStatusEnum, ConfigActionEnum, \ -# ConfigRule, KpiSampleType, Base -from sqlalchemy import create_engine +from sqlalchemy.orm import sessionmaker +from .database import rebuild_database +from .ContextService import ContextService +from .Engine import Engine + +LOG_LEVEL = get_log_level() +logging.basicConfig(level=LOG_LEVEL, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s") +LOGGER = logging.getLogger(__name__) + +LOGGER.addHandler(logging.StreamHandler(stream=sys.stderr)) +LOGGER.setLevel(logging.WARNING) terminate = threading.Event() -LOGGER = None +LOGGER : logging.Logger = None def signal_handler(signal, frame): # pylint: disable=redefined-outer-name LOGGER.warning('Terminate signal received') terminate.set() def main(): - global LOGGER # pylint: disable=global-statement - - log_level = get_log_level() - logging.basicConfig(level=log_level, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s") - LOGGER = logging.getLogger(__name__) - + LOGGER.info('Starting...') signal.signal(signal.SIGINT, signal_handler) signal.signal(signal.SIGTERM, signal_handler) - LOGGER.info('Starting...') - # Start metrics server metrics_port = get_metrics_port() start_http_server(metrics_port) - # Get database instance - db_uri = 'cockroachdb://root@10.152.183.111:26257/defaultdb?sslmode=disable' - LOGGER.debug('Connecting to DB: {}'.format(db_uri)) - - # engine = create_engine(db_uri, echo=False) - - try: - engine = create_engine(db_uri) - except Exception as e: - LOGGER.error("Failed to connect to database.") - LOGGER.error(f"{e}") - return 1 - - Base.metadata.create_all(engine) - session = sessionmaker(bind=engine, expire_on_commit=False) + db_engine = Engine().get_engine() + rebuild_database(db_engine, drop_if_exists=False) # Get message broker instance messagebroker = MessageBroker(get_messagebroker_backend()) # Starting context service - grpc_service = ContextService(session, messagebroker) + grpc_service = ContextService(db_engine, messagebroker) grpc_service.start() - rest_server = RestServer() - for endpoint_name, resource_class, resource_url in RESOURCES: - rest_server.add_resource(resource_class, resource_url, endpoint=endpoint_name, resource_class_args=(session,)) - rest_server.start() - - populate_fake_data = get_setting('POPULATE_FAKE_DATA', default=POPULATE_FAKE_DATA) - if isinstance(populate_fake_data, str): populate_fake_data = (populate_fake_data.upper() in {'T', '1', 'TRUE'}) - if populate_fake_data: - LOGGER.info('Populating fake data...') - populate(host='127.0.0.1', port=grpc_service.bind_port) - LOGGER.info('Fake Data populated') - # Wait for Ctrl+C or termination signal while not terminate.wait(timeout=0.1): pass LOGGER.info('Terminating...') grpc_service.stop() - rest_server.shutdown() - rest_server.join() LOGGER.info('Bye') return 0 diff --git a/src/context/service/rest_server/__init__.py b/src/context/service/_old_code/Config.py similarity index 86% rename from src/context/service/rest_server/__init__.py rename to src/context/service/_old_code/Config.py index 70a332512..6f5d1dc0b 100644 --- a/src/context/service/rest_server/__init__.py +++ b/src/context/service/_old_code/Config.py @@ -12,3 +12,5 @@ # See the License for the specific language governing permissions and # limitations under the License. +# Autopopulate the component with fake data for testing purposes? +POPULATE_FAKE_DATA = False diff --git a/src/context/service/Populate.py b/src/context/service/_old_code/Populate.py similarity index 100% rename from src/context/service/Populate.py rename to src/context/service/_old_code/Populate.py diff --git a/src/context/service/rest_server/Resources.py b/src/context/service/_old_code/Resources.py similarity index 100% rename from src/context/service/rest_server/Resources.py rename to src/context/service/_old_code/Resources.py diff --git a/src/context/service/rest_server/RestServer.py b/src/context/service/_old_code/RestServer.py similarity index 100% rename from src/context/service/rest_server/RestServer.py rename to src/context/service/_old_code/RestServer.py diff --git a/src/context/service/grpc_server/__init__.py b/src/context/service/_old_code/__init__.py similarity index 100% rename from src/context/service/grpc_server/__init__.py rename to src/context/service/_old_code/__init__.py diff --git a/src/context/service/_old_code/__main__.py b/src/context/service/_old_code/__main__.py new file mode 100644 index 000000000..69d3f5cbe --- /dev/null +++ b/src/context/service/_old_code/__main__.py @@ -0,0 +1,85 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, signal, sys, threading +from prometheus_client import start_http_server +from common.Settings import get_log_level, get_metrics_port, get_setting +from common.orm.Database import Database +from common.orm.Factory import get_database_backend +from common.message_broker.Factory import get_messagebroker_backend +from common.message_broker.MessageBroker import MessageBroker +from context.service.grpc_server.ContextService import ContextService +from .Config import POPULATE_FAKE_DATA +from .Populate import populate +from .Resources import RESOURCES +from .RestServer import RestServer + +terminate = threading.Event() +LOGGER = None + +def signal_handler(signal, frame): # pylint: disable=redefined-outer-name + LOGGER.warning('Terminate signal received') + terminate.set() + +def main(): + global LOGGER # pylint: disable=global-statement + + log_level = get_log_level() + logging.basicConfig(level=log_level, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s") + LOGGER = logging.getLogger(__name__) + + signal.signal(signal.SIGINT, signal_handler) + signal.signal(signal.SIGTERM, signal_handler) + + LOGGER.info('Starting...') + + # Start metrics server + metrics_port = get_metrics_port() + start_http_server(metrics_port) + + # Get database instance + database = Database(get_database_backend()) + + # Get message broker instance + messagebroker = MessageBroker(get_messagebroker_backend()) + + # Starting context service + grpc_service = ContextService(database, messagebroker) + grpc_service.start() + + rest_server = RestServer() + for endpoint_name, resource_class, resource_url in RESOURCES: + rest_server.add_resource(resource_class, resource_url, endpoint=endpoint_name, resource_class_args=(database,)) + rest_server.start() + + populate_fake_data = get_setting('POPULATE_FAKE_DATA', default=POPULATE_FAKE_DATA) + if isinstance(populate_fake_data, str): populate_fake_data = (populate_fake_data.upper() in {'T', '1', 'TRUE'}) + if populate_fake_data: + LOGGER.info('Populating fake data...') + populate(host='127.0.0.1', port=grpc_service.bind_port) + LOGGER.info('Fake Data populated') + + # Wait for Ctrl+C or termination signal + while not terminate.wait(timeout=0.1): pass + + LOGGER.info('Terminating...') + grpc_service.stop() + rest_server.shutdown() + rest_server.join() + + LOGGER.info('Bye') + return 0 + +if __name__ == '__main__': + sys.exit(main()) diff --git a/src/context/service/_old_code/test_unitary.py b/src/context/service/_old_code/test_unitary.py new file mode 100644 index 000000000..04e054aad --- /dev/null +++ b/src/context/service/_old_code/test_unitary.py @@ -0,0 +1,1450 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# pylint: disable=too-many-lines +import copy, grpc, logging, os, pytest, requests, time, urllib +from typing import Tuple +from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID, ServiceNameEnum +from common.Settings import ( + ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, ENVVAR_SUFIX_SERVICE_PORT_HTTP, get_env_var_name, + get_service_baseurl_http, get_service_port_grpc, get_service_port_http) +from context.service.Database import Database +from common.message_broker.Factory import get_messagebroker_backend, BackendEnum as MessageBrokerBackendEnum +from common.message_broker.MessageBroker import MessageBroker +from common.proto.context_pb2 import ( + Connection, ConnectionEvent, ConnectionId, Context, ContextEvent, ContextId, Device, DeviceEvent, DeviceId, + DeviceOperationalStatusEnum, Empty, EventTypeEnum, Link, LinkEvent, LinkId, Service, ServiceEvent, ServiceId, + ServiceStatusEnum, ServiceTypeEnum, Topology, TopologyEvent, TopologyId) +from common.proto.policy_pb2 import (PolicyRuleIdList, PolicyRuleId, PolicyRuleList, PolicyRule) +from common.type_checkers.Assertions import ( + validate_connection, validate_connection_ids, validate_connections, validate_context, validate_context_ids, + validate_contexts, validate_device, validate_device_ids, validate_devices, validate_link, validate_link_ids, + validate_links, validate_service, validate_service_ids, validate_services, validate_topologies, validate_topology, + validate_topology_ids) +from context.client.ContextClient import ContextClient +from context.client.EventsCollector import EventsCollector +from context.service.database.Tools import ( + FASTHASHER_DATA_ACCEPTED_FORMAT, FASTHASHER_ITEM_ACCEPTED_FORMAT, fast_hasher) +from context.service.grpc_server.ContextService import ContextService +from context.service._old_code.Populate import populate +from context.service.rest_server.RestServer import RestServer +from context.service.rest_server.Resources import RESOURCES +from requests import Session +from sqlalchemy import create_engine +from sqlalchemy.orm import sessionmaker +from context.service.database._Base import Base + +from .Objects import ( + CONNECTION_R1_R3, CONNECTION_R1_R3_ID, CONNECTION_R1_R3_UUID, CONTEXT, CONTEXT_ID, DEVICE_R1, DEVICE_R1_ID, + DEVICE_R1_UUID, DEVICE_R2, DEVICE_R2_ID, DEVICE_R2_UUID, DEVICE_R3, DEVICE_R3_ID, DEVICE_R3_UUID, LINK_R1_R2, + LINK_R1_R2_ID, LINK_R1_R2_UUID, SERVICE_R1_R2, SERVICE_R1_R2_ID, SERVICE_R1_R2_UUID, SERVICE_R1_R3, + SERVICE_R1_R3_ID, SERVICE_R1_R3_UUID, SERVICE_R2_R3, SERVICE_R2_R3_ID, SERVICE_R2_R3_UUID, TOPOLOGY, TOPOLOGY_ID, + POLICY_RULE, POLICY_RULE_ID, POLICY_RULE_UUID) + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +LOCAL_HOST = '127.0.0.1' +GRPC_PORT = 10000 + int(get_service_port_grpc(ServiceNameEnum.CONTEXT)) # avoid privileged ports +HTTP_PORT = 10000 + int(get_service_port_http(ServiceNameEnum.CONTEXT)) # avoid privileged ports + +os.environ[get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_HOST )] = str(LOCAL_HOST) +os.environ[get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(GRPC_PORT) +os.environ[get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_HTTP)] = str(HTTP_PORT) + +DEFAULT_REDIS_SERVICE_HOST = LOCAL_HOST +DEFAULT_REDIS_SERVICE_PORT = 6379 +DEFAULT_REDIS_DATABASE_ID = 0 + +REDIS_CONFIG = { + 'REDIS_SERVICE_HOST': os.environ.get('REDIS_SERVICE_HOST', DEFAULT_REDIS_SERVICE_HOST), + 'REDIS_SERVICE_PORT': os.environ.get('REDIS_SERVICE_PORT', DEFAULT_REDIS_SERVICE_PORT), + 'REDIS_DATABASE_ID' : os.environ.get('REDIS_DATABASE_ID', DEFAULT_REDIS_DATABASE_ID ), +} + +SCENARIOS = [ + ('all_sqlalchemy', {}, MessageBrokerBackendEnum.INMEMORY, {} ), + ('all_inmemory', DatabaseBackendEnum.INMEMORY, {}, MessageBrokerBackendEnum.INMEMORY, {} ) +# ('all_redis', DatabaseBackendEnum.REDIS, REDIS_CONFIG, MessageBrokerBackendEnum.REDIS, REDIS_CONFIG), +] + +@pytest.fixture(scope='session', ids=[str(scenario[0]) for scenario in SCENARIOS], params=SCENARIOS) +def context_s_mb(request) -> Tuple[Session, MessageBroker]: + name,db_session,mb_backend,mb_settings = request.param + msg = 'Running scenario {:s} db_session={:s}, mb_backend={:s}, mb_settings={:s}...' + LOGGER.info(msg.format(str(name), str(db_session), str(mb_backend.value), str(mb_settings))) + + db_uri = 'cockroachdb://root@10.152.183.111:26257/defaultdb?sslmode=disable' + LOGGER.debug('Connecting to DB: {}'.format(db_uri)) + + try: + engine = create_engine(db_uri) + except Exception as e: + LOGGER.error("Failed to connect to database.") + LOGGER.error(f"{e}") + return 1 + + Base.metadata.create_all(engine) + _session = sessionmaker(bind=engine, expire_on_commit=False) + + _message_broker = MessageBroker(get_messagebroker_backend(backend=mb_backend, **mb_settings)) + yield _session, _message_broker + _message_broker.terminate() + +@pytest.fixture(scope='session') +def context_service_grpc(context_s_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name + _service = ContextService(context_s_mb[0], context_s_mb[1]) + _service.start() + yield _service + _service.stop() +@pytest.fixture(scope='session') +def context_service_rest(context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name + database = context_db_mb[0] + _rest_server = RestServer() + for endpoint_name, resource_class, resource_url in RESOURCES: + _rest_server.add_resource(resource_class, resource_url, endpoint=endpoint_name, resource_class_args=(database,)) + _rest_server.start() + time.sleep(1) # bring time for the server to start + yield _rest_server + _rest_server.shutdown() + _rest_server.join() +@pytest.fixture(scope='session') +def context_client_grpc(context_service_grpc : ContextService): # pylint: disable=redefined-outer-name + _client = ContextClient() + yield _client + _client.close() +""" +def do_rest_request(url : str): + base_url = get_service_baseurl_http(ServiceNameEnum.CONTEXT) + request_url = 'http://{:s}:{:s}{:s}{:s}'.format(str(LOCAL_HOST), str(HTTP_PORT), str(base_url), url) + LOGGER.warning('Request: GET {:s}'.format(str(request_url))) + reply = requests.get(request_url) + LOGGER.warning('Reply: {:s}'.format(str(reply.text))) + assert reply.status_code == 200, 'Reply failed with code {}'.format(reply.status_code) + return reply.json() +""" + +"""# ----- Test gRPC methods ---------------------------------------------------------------------------------------------- +def test_grpc_context( + context_client_grpc : ContextClient, # pylint: disable=redefined-outer-name + context_s_mb : Tuple[Session, MessageBroker]): # pylint: disable=redefined-outer-name + Session = context_s_mb[0] + + database = Database(Session) + + # ----- Clean the database ----------------------------------------------------------------------------------------- + database.clear() + # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- + events_collector = EventsCollector(context_client_grpc) + events_collector.start() + + # ----- Get when the object does not exist ------------------------------------------------------------------------- + with pytest.raises(grpc.RpcError) as e: + context_client_grpc.GetContext(ContextId(**CONTEXT_ID)) + assert e.value.code() == grpc.StatusCode.NOT_FOUND + assert e.value.details() == 'Context({:s}) not found'.format(DEFAULT_CONTEXT_UUID) + + # ----- List when the object does not exist ------------------------------------------------------------------------ + response = context_client_grpc.ListContextIds(Empty()) + assert len(response.context_ids) == 0 + + response = context_client_grpc.ListContexts(Empty()) + assert len(response.contexts) == 0 + + # ----- Dump state of database before create the object ------------------------------------------------------------ + db_entries = database.dump_all() + LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) + for db_entry in db_entries: + LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover + LOGGER.info('-----------------------------------------------------------') + assert len(db_entries) == 0 + + # ----- Create the object ------------------------------------------------------------------------------------------ + response = context_client_grpc.SetContext(Context(**CONTEXT)) + assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID + + wrong_uuid = 'c97c4185-e1d1-4ea7-b6b9-afbf76cb61f4' + with pytest.raises(grpc.RpcError) as e: + WRONG_TOPOLOGY_ID = copy.deepcopy(TOPOLOGY_ID) + WRONG_TOPOLOGY_ID['context_id']['context_uuid']['uuid'] = wrong_uuid + WRONG_CONTEXT = copy.deepcopy(CONTEXT) + WRONG_CONTEXT['topology_ids'].append(WRONG_TOPOLOGY_ID) + context_client_grpc.SetContext(Context(**WRONG_CONTEXT)) + assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT + msg = 'request.topology_ids[0].context_id.context_uuid.uuid({}) is invalid; '\ + 'should be == request.context_id.context_uuid.uuid({})'.format(wrong_uuid, DEFAULT_CONTEXT_UUID) + assert e.value.details() == msg + + with pytest.raises(grpc.RpcError) as e: + WRONG_SERVICE_ID = copy.deepcopy(SERVICE_R1_R2_ID) + WRONG_SERVICE_ID['context_id']['context_uuid']['uuid'] = wrong_uuid + WRONG_CONTEXT = copy.deepcopy(CONTEXT) + WRONG_CONTEXT['service_ids'].append(WRONG_SERVICE_ID) + context_client_grpc.SetContext(Context(**WRONG_CONTEXT)) + assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT + msg = 'request.service_ids[0].context_id.context_uuid.uuid({}) is invalid; '\ + 'should be == request.context_id.context_uuid.uuid({})'.format(wrong_uuid, DEFAULT_CONTEXT_UUID) + assert e.value.details() == msg + + # ----- Check create event ----------------------------------------------------------------------------------------- + event = events_collector.get_event(block=True) + assert isinstance(event, ContextEvent) + assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert event.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + # ----- Update the object ------------------------------------------------------------------------------------------ + response = context_client_grpc.SetContext(Context(**CONTEXT)) + assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID + + # ----- Check update event ----------------------------------------------------------------------------------------- + event = events_collector.get_event(block=True) + assert isinstance(event, ContextEvent) + assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + assert event.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + + # ----- Dump state of database after create/update the object ------------------------------------------------------ + db_entries = database.dump_all() + + LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) + for db_entry in db_entries: + LOGGER.info(db_entry) + LOGGER.info('-----------------------------------------------------------') + assert len(db_entries) == 1 + + # ----- Get when the object exists --------------------------------------------------------------------------------- + response = context_client_grpc.GetContext(ContextId(**CONTEXT_ID)) + assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert len(response.topology_ids) == 0 + assert len(response.service_ids) == 0 + + # ----- List when the object exists -------------------------------------------------------------------------------- + response = context_client_grpc.ListContextIds(Empty()) + assert len(response.context_ids) == 1 + assert response.context_ids[0].context_uuid.uuid == DEFAULT_CONTEXT_UUID + + response = context_client_grpc.ListContexts(Empty()) + assert len(response.contexts) == 1 + assert response.contexts[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert len(response.contexts[0].topology_ids) == 0 + assert len(response.contexts[0].service_ids) == 0 + + # ----- Remove the object ------------------------------------------------------------------------------------------ + context_client_grpc.RemoveContext(ContextId(**CONTEXT_ID)) + + # ----- Check remove event ----------------------------------------------------------------------------------------- + # event = events_collector.get_event(block=True) + # assert isinstance(event, ContextEvent) + # assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + # assert event.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + + # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- + events_collector.stop() + + # ----- Dump state of database after remove the object ------------------------------------------------------------- + db_entries = database.dump_all() + + LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) + for db_entry in db_entries: + LOGGER.info(db_entry) + LOGGER.info('-----------------------------------------------------------') + assert len(db_entries) == 0 + + +def test_grpc_topology( + context_client_grpc: ContextClient, # pylint: disable=redefined-outer-name + context_s_mb: Tuple[Session, MessageBroker]): # pylint: disable=redefined-outer-name + session = context_s_mb[0] + + database = Database(session) + + # ----- Clean the database ----------------------------------------------------------------------------------------- + database.clear() + + # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- + events_collector = EventsCollector(context_client_grpc) + events_collector.start() + + # ----- Prepare dependencies for the test and capture related events ----------------------------------------------- + response = context_client_grpc.SetContext(Context(**CONTEXT)) + assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID + # event = events_collector.get_event(block=True) + # assert isinstance(event, ContextEvent) + # assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE + # assert event.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + + # ----- Get when the object does not exist ------------------------------------------------------------------------- + with pytest.raises(grpc.RpcError) as e: + context_client_grpc.GetTopology(TopologyId(**TOPOLOGY_ID)) + assert e.value.code() == grpc.StatusCode.NOT_FOUND + # assert e.value.details() == 'Topology({:s}/{:s}) not found'.format(DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID) + assert e.value.details() == 'Topology({:s}) not found'.format(DEFAULT_TOPOLOGY_UUID) + # ----- List when the object does not exist ------------------------------------------------------------------------ + response = context_client_grpc.ListTopologyIds(ContextId(**CONTEXT_ID)) + assert len(response.topology_ids) == 0 + response = context_client_grpc.ListTopologies(ContextId(**CONTEXT_ID)) + assert len(response.topologies) == 0 + + # ----- Dump state of database before create the object ------------------------------------------------------------ + db_entries = database.dump_all() + LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) + for db_entry in db_entries: + LOGGER.info(db_entry) + LOGGER.info('-----------------------------------------------------------') + assert len(db_entries) == 1 + + # ----- Create the object ------------------------------------------------------------------------------------------ + response = context_client_grpc.SetTopology(Topology(**TOPOLOGY)) + assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + + CONTEXT_WITH_TOPOLOGY = copy.deepcopy(CONTEXT) + CONTEXT_WITH_TOPOLOGY['topology_ids'].append(TOPOLOGY_ID) + response = context_client_grpc.SetContext(Context(**CONTEXT_WITH_TOPOLOGY)) + assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID + + # ----- Check create event ----------------------------------------------------------------------------------------- + # events = events_collector.get_events(block=True, count=2) + + # assert isinstance(events[0], TopologyEvent) + # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + # assert events[0].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + # assert events[0].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + + # assert isinstance(events[1], ContextEvent) + # assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + # assert events[1].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + + # ----- Update the object ------------------------------------------------------------------------------------------ + response = context_client_grpc.SetTopology(Topology(**TOPOLOGY)) + assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + + # ----- Check update event ----------------------------------------------------------------------------------------- + # event = events_collector.get_event(block=True) + # assert isinstance(event, TopologyEvent) + # assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + # assert event.topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + # assert event.topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + + # ----- Dump state of database after create/update the object ------------------------------------------------------ + db_entries = database.dump_all() + LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) + for db_entry in db_entries: + LOGGER.info(db_entry) + LOGGER.info('-----------------------------------------------------------') + assert len(db_entries) == 2 + + # ----- Get when the object exists --------------------------------------------------------------------------------- + response = context_client_grpc.GetTopology(TopologyId(**TOPOLOGY_ID)) + assert response.topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + assert len(response.device_ids) == 0 + assert len(response.link_ids) == 0 + + # ----- List when the object exists -------------------------------------------------------------------------------- + response = context_client_grpc.ListTopologyIds(ContextId(**CONTEXT_ID)) + assert len(response.topology_ids) == 1 + assert response.topology_ids[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.topology_ids[0].topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + + response = context_client_grpc.ListTopologies(ContextId(**CONTEXT_ID)) + assert len(response.topologies) == 1 + assert response.topologies[0].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.topologies[0].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + assert len(response.topologies[0].device_ids) == 0 + assert len(response.topologies[0].link_ids) == 0 + + # ----- Remove the object ------------------------------------------------------------------------------------------ + context_client_grpc.RemoveTopology(TopologyId(**TOPOLOGY_ID)) + context_client_grpc.RemoveContext(ContextId(**CONTEXT_ID)) + + # ----- Check remove event ----------------------------------------------------------------------------------------- + # events = events_collector.get_events(block=True, count=2) + + # assert isinstance(events[0], TopologyEvent) + # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + # assert events[0].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + # assert events[0].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + + # assert isinstance(events[1], ContextEvent) + # assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + # assert events[1].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + + # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- + # events_collector.stop() + + # ----- Dump state of database after remove the object ------------------------------------------------------------- + db_entries = database.dump_all() + LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) + for db_entry in db_entries: + LOGGER.info(db_entry) + LOGGER.info('-----------------------------------------------------------') + assert len(db_entries) == 0 + + +def test_grpc_device( + context_client_grpc: ContextClient, # pylint: disable=redefined-outer-name + context_s_mb: Tuple[Session, MessageBroker]): # pylint: disable=redefined-outer-name + session = context_s_mb[0] + + database = Database(session) + + # ----- Clean the database ----------------------------------------------------------------------------------------- + database.clear() + + # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- + events_collector = EventsCollector(context_client_grpc) + events_collector.start() + + # ----- Prepare dependencies for the test and capture related events ----------------------------------------------- + response = context_client_grpc.SetContext(Context(**CONTEXT)) + assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID + + response = context_client_grpc.SetTopology(Topology(**TOPOLOGY)) + assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + + events = events_collector.get_events(block=True, count=2) + + assert isinstance(events[0], ContextEvent) + assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert events[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + + assert isinstance(events[1], TopologyEvent) + assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert events[1].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert events[1].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + + # ----- Get when the object does not exist ------------------------------------------------------------------------- + with pytest.raises(grpc.RpcError) as e: + context_client_grpc.GetDevice(DeviceId(**DEVICE_R1_ID)) + assert e.value.code() == grpc.StatusCode.NOT_FOUND + assert e.value.details() == 'Device({:s}) not found'.format(DEVICE_R1_UUID) + + # ----- List when the object does not exist ------------------------------------------------------------------------ + response = context_client_grpc.ListDeviceIds(Empty()) + assert len(response.device_ids) == 0 + + response = context_client_grpc.ListDevices(Empty()) + assert len(response.devices) == 0 + + # ----- Dump state of database before create the object ------------------------------------------------------------ + db_entries = database.dump_all() + LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) + for db_entry in db_entries: + LOGGER.info(db_entry) + LOGGER.info('-----------------------------------------------------------') + assert len(db_entries) == 2 + + # ----- Create the object ------------------------------------------------------------------------------------------ + with pytest.raises(grpc.RpcError) as e: + WRONG_DEVICE = copy.deepcopy(DEVICE_R1) + WRONG_DEVICE_UUID = '3f03c76d-31fb-47f5-9c1d-bc6b6bfa2d08' + WRONG_DEVICE['device_endpoints'][0]['endpoint_id']['device_id']['device_uuid']['uuid'] = WRONG_DEVICE_UUID + context_client_grpc.SetDevice(Device(**WRONG_DEVICE)) + assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT + msg = 'request.device_endpoints[0].device_id.device_uuid.uuid({}) is invalid; '\ + 'should be == request.device_id.device_uuid.uuid({})'.format(WRONG_DEVICE_UUID, DEVICE_R1_UUID) + assert e.value.details() == msg + response = context_client_grpc.SetDevice(Device(**DEVICE_R1)) + assert response.device_uuid.uuid == DEVICE_R1_UUID + + # ----- Check create event ----------------------------------------------------------------------------------------- + # event = events_collector.get_event(block=True) + # assert isinstance(event, DeviceEvent) + # assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE + # assert event.device_id.device_uuid.uuid == DEVICE_R1_UUID + + # ----- Update the object ------------------------------------------------------------------------------------------ + response = context_client_grpc.SetDevice(Device(**DEVICE_R1)) + assert response.device_uuid.uuid == DEVICE_R1_UUID + + # ----- Check update event ----------------------------------------------------------------------------------------- + # event = events_collector.get_event(block=True) + # assert isinstance(event, DeviceEvent) + # assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + # assert event.device_id.device_uuid.uuid == DEVICE_R1_UUID + + # ----- Dump state of database after create/update the object ------------------------------------------------------ + db_entries = database.dump_all() + LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) + for db_entry in db_entries: + LOGGER.info(db_entry) + LOGGER.info('-----------------------------------------------------------') + assert len(db_entries) == 47 + + # ----- Get when the object exists --------------------------------------------------------------------------------- + response = context_client_grpc.GetDevice(DeviceId(**DEVICE_R1_ID)) + assert response.device_id.device_uuid.uuid == DEVICE_R1_UUID + assert response.device_type == 'packet-router' + assert len(response.device_config.config_rules) == 3 + assert response.device_operational_status == DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_DISABLED + assert len(response.device_drivers) == 1 + assert len(response.device_endpoints) == 3 + + # ----- List when the object exists -------------------------------------------------------------------------------- + response = context_client_grpc.ListDeviceIds(Empty()) + assert len(response.device_ids) == 1 + assert response.device_ids[0].device_uuid.uuid == DEVICE_R1_UUID + + response = context_client_grpc.ListDevices(Empty()) + assert len(response.devices) == 1 + assert response.devices[0].device_id.device_uuid.uuid == DEVICE_R1_UUID + assert response.devices[0].device_type == 'packet-router' + assert len(response.devices[0].device_config.config_rules) == 3 + assert response.devices[0].device_operational_status == DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_DISABLED + assert len(response.devices[0].device_drivers) == 1 + assert len(response.devices[0].device_endpoints) == 3 + + # ----- Create object relation ------------------------------------------------------------------------------------- + TOPOLOGY_WITH_DEVICE = copy.deepcopy(TOPOLOGY) + TOPOLOGY_WITH_DEVICE['device_ids'].append(DEVICE_R1_ID) + response = context_client_grpc.SetTopology(Topology(**TOPOLOGY_WITH_DEVICE)) + assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + + # ----- Check update event ----------------------------------------------------------------------------------------- + # event = events_collector.get_event(block=True) + # assert isinstance(event, TopologyEvent) + # assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + # assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + # assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + + # ----- Check relation was created --------------------------------------------------------------------------------- + response = context_client_grpc.GetTopology(TopologyId(**TOPOLOGY_ID)) + assert response.topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + assert len(response.device_ids) == 1 + assert response.device_ids[0].device_uuid.uuid == DEVICE_R1_UUID + assert len(response.link_ids) == 0 + + # ----- Dump state of database after creating the object relation -------------------------------------------------- + db_entries = database.dump_all() + LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) + for db_entry in db_entries: + LOGGER.info(db_entry) + LOGGER.info('-----------------------------------------------------------') + assert len(db_entries) == 47 + + # ----- Remove the object -------------------------------ro----------------------------------------------------------- + context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R1_ID)) + context_client_grpc.RemoveTopology(TopologyId(**TOPOLOGY_ID)) + context_client_grpc.RemoveContext(ContextId(**CONTEXT_ID)) + + # ----- Check remove event ----------------------------------------------------------------------------------------- + # events = events_collector.get_events(block=True, count=3) + + # assert isinstance(events[0], DeviceEvent) + # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + # assert events[0].device_id.device_uuid.uuid == DEVICE_R1_UUID + + # assert isinstance(events[1], TopologyEvent) + # assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + # assert events[1].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + # assert events[1].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + + # assert isinstance(events[2], ContextEvent) + # assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + # assert events[2].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + + # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- + # events_collector.stop() + + # ----- Dump state of database after remove the object ------------------------------------------------------------- + db_entries = database.dump_all() + LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) + for db_entry in db_entries: + LOGGER.info(db_entry) + LOGGER.info('-----------------------------------------------------------') + assert len(db_entries) == 0 + + +def test_grpc_link( + context_client_grpc: ContextClient, # pylint: disable=redefined-outer-name + context_s_mb: Tuple[Session, MessageBroker]): # pylint: disable=redefined-outer-name + session = context_s_mb[0] + + database = Database(session) + + # ----- Clean the database ----------------------------------------------------------------------------------------- + database.clear() + + # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- + events_collector = EventsCollector(context_client_grpc) + events_collector.start() + + # ----- Prepare dependencies for the test and capture related events ----------------------------------------------- + response = context_client_grpc.SetContext(Context(**CONTEXT)) + assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID + + response = context_client_grpc.SetTopology(Topology(**TOPOLOGY)) + assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + + response = context_client_grpc.SetDevice(Device(**DEVICE_R1)) + assert response.device_uuid.uuid == DEVICE_R1_UUID + + response = context_client_grpc.SetDevice(Device(**DEVICE_R2)) + assert response.device_uuid.uuid == DEVICE_R2_UUID + # events = events_collector.get_events(block=True, count=4) + + # assert isinstance(events[0], ContextEvent) + # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + # assert events[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + # + # assert isinstance(events[1], TopologyEvent) + # assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + # assert events[1].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + # assert events[1].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + # + # assert isinstance(events[2], DeviceEvent) + # assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + # assert events[2].device_id.device_uuid.uuid == DEVICE_R1_UUID + # + # assert isinstance(events[3], DeviceEvent) + # assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + # assert events[3].device_id.device_uuid.uuid == DEVICE_R2_UUID + + # ----- Get when the object does not exist ------------------------------------------------------------------------- + with pytest.raises(grpc.RpcError) as e: + context_client_grpc.GetLink(LinkId(**LINK_R1_R2_ID)) + assert e.value.code() == grpc.StatusCode.NOT_FOUND + assert e.value.details() == 'Link({:s}) not found'.format(LINK_R1_R2_UUID) + + # ----- List when the object does not exist ------------------------------------------------------------------------ + response = context_client_grpc.ListLinkIds(Empty()) + assert len(response.link_ids) == 0 + + response = context_client_grpc.ListLinks(Empty()) + assert len(response.links) == 0 + + # ----- Dump state of database before create the object ------------------------------------------------------------ + db_entries = database.dump_all() + LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) + for db_entry in db_entries: + LOGGER.info(db_entry) + LOGGER.info('-----------------------------------------------------------') + assert len(db_entries) == 80 + + # ----- Create the object ------------------------------------------------------------------------------------------ + response = context_client_grpc.SetLink(Link(**LINK_R1_R2)) + assert response.link_uuid.uuid == LINK_R1_R2_UUID + + # ----- Check create event ----------------------------------------------------------------------------------------- + # event = events_collector.get_event(block=True) + # assert isinstance(event, LinkEvent) + # assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE + # assert event.link_id.link_uuid.uuid == LINK_R1_R2_UUID + + # ----- Update the object ------------------------------------------------------------------------------------------ + response = context_client_grpc.SetLink(Link(**LINK_R1_R2)) + assert response.link_uuid.uuid == LINK_R1_R2_UUID + # ----- Check update event ----------------------------------------------------------------------------------------- + # event = events_collector.get_event(block=True) + # assert isinstance(event, LinkEvent) + # assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + # assert event.link_id.link_uuid.uuid == LINK_R1_R2_UUID + + # ----- Dump state of database after create/update the object ------------------------------------------------------ + db_entries = database.dump_all() + LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) + for db_entry in db_entries: + LOGGER.info(db_entry) + LOGGER.info('-----------------------------------------------------------') + assert len(db_entries) == 88 + + # ----- Get when the object exists --------------------------------------------------------------------------------- + response = context_client_grpc.GetLink(LinkId(**LINK_R1_R2_ID)) + assert response.link_id.link_uuid.uuid == LINK_R1_R2_UUID + assert len(response.link_endpoint_ids) == 2 + + # ----- List when the object exists -------------------------------------------------------------------------------- + response = context_client_grpc.ListLinkIds(Empty()) + assert len(response.link_ids) == 1 + assert response.link_ids[0].link_uuid.uuid == LINK_R1_R2_UUID + + response = context_client_grpc.ListLinks(Empty()) + assert len(response.links) == 1 + assert response.links[0].link_id.link_uuid.uuid == LINK_R1_R2_UUID + + assert len(response.links[0].link_endpoint_ids) == 2 + + # ----- Create object relation ------------------------------------------------------------------------------------- + TOPOLOGY_WITH_LINK = copy.deepcopy(TOPOLOGY) + TOPOLOGY_WITH_LINK['link_ids'].append(LINK_R1_R2_ID) + response = context_client_grpc.SetTopology(Topology(**TOPOLOGY_WITH_LINK)) + assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + + # ----- Check update event ----------------------------------------------------------------------------------------- + # event = events_collector.get_event(block=True) + # assert isinstance(event, TopologyEvent) + # assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + # assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + # assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + + # ----- Check relation was created --------------------------------------------------------------------------------- + response = context_client_grpc.GetTopology(TopologyId(**TOPOLOGY_ID)) + assert response.topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + assert len(response.device_ids) == 2 + # assert response.device_ids[0].device_uuid.uuid == DEVICE_R1_UUID + # assert response.device_ids[1].device_uuid.uuid == DEVICE_R2_UUID + assert len(response.link_ids) == 1 + assert response.link_ids[0].link_uuid.uuid == LINK_R1_R2_UUID + + db_entries = database.dump_all() + LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) + for db_entry in db_entries: + LOGGER.info(db_entry) + LOGGER.info('-----------------------------------------------------------') + assert len(db_entries) == 88 + + # ----- Remove the object ------------------------------------------------------------------------------------------ + context_client_grpc.RemoveLink(LinkId(**LINK_R1_R2_ID)) + context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R1_ID)) + context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R2_ID)) + context_client_grpc.RemoveTopology(TopologyId(**TOPOLOGY_ID)) + context_client_grpc.RemoveContext(ContextId(**CONTEXT_ID)) + + # ----- Check remove event ----------------------------------------------------------------------------------------- + # events = events_collector.get_events(block=True, count=5) + # + # assert isinstance(events[0], LinkEvent) + # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + # assert events[0].link_id.link_uuid.uuid == LINK_R1_R2_UUID + # + # assert isinstance(events[1], DeviceEvent) + # assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + # assert events[1].device_id.device_uuid.uuid == DEVICE_R1_UUID + # + # assert isinstance(events[2], DeviceEvent) + # assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + # assert events[2].device_id.device_uuid.uuid == DEVICE_R2_UUID + # + # assert isinstance(events[3], TopologyEvent) + # assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + # assert events[3].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + # assert events[3].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + # + # assert isinstance(events[4], ContextEvent) + # assert events[4].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + # assert events[4].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + + # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- + events_collector.stop() + + # ----- Dump state of database after remove the object ------------------------------------------------------------- + db_entries = database.dump_all() + LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) + for db_entry in db_entries: + LOGGER.info(db_entry) + LOGGER.info('-----------------------------------------------------------') + assert len(db_entries) == 0 +""" + +def test_grpc_service( + context_client_grpc : ContextClient, # pylint: disable=redefined-outer-name + context_s_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name + Session = context_s_mb[0] + # ----- Clean the database ----------------------------------------------------------------------------------------- + database = Database(Session) + database.clear() + + # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- + events_collector = EventsCollector(context_client_grpc) + events_collector.start() + + # ----- Prepare dependencies for the test and capture related events ----------------------------------------------- + response = context_client_grpc.SetContext(Context(**CONTEXT)) + assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID + + response = context_client_grpc.SetTopology(Topology(**TOPOLOGY)) + assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + + response = context_client_grpc.SetDevice(Device(**DEVICE_R1)) + assert response.device_uuid.uuid == DEVICE_R1_UUID + + response = context_client_grpc.SetDevice(Device(**DEVICE_R2)) + assert response.device_uuid.uuid == DEVICE_R2_UUID + # events = events_collector.get_events(block=True, count=4) + # + # assert isinstance(events[0], ContextEvent) + # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + # assert events[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + # + # assert isinstance(events[1], TopologyEvent) + # assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + # assert events[1].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + # assert events[1].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + # + # assert isinstance(events[2], DeviceEvent) + # assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + # assert events[2].device_id.device_uuid.uuid == DEVICE_R1_UUID + # + # assert isinstance(events[3], DeviceEvent) + # assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + # assert events[3].device_id.device_uuid.uuid == DEVICE_R2_UUID + LOGGER.info('----------------') + + # ----- Get when the object does not exist ------------------------------------------------------------------------- + with pytest.raises(grpc.RpcError) as e: + context_client_grpc.GetService(ServiceId(**SERVICE_R1_R2_ID)) + assert e.value.code() == grpc.StatusCode.NOT_FOUND + assert e.value.details() == 'Service({:s}) not found'.format(SERVICE_R1_R2_UUID) + LOGGER.info('----------------') + + # ----- List when the object does not exist ------------------------------------------------------------------------ + response = context_client_grpc.ListServiceIds(ContextId(**CONTEXT_ID)) + assert len(response.service_ids) == 0 + LOGGER.info('----------------') + + response = context_client_grpc.ListServices(ContextId(**CONTEXT_ID)) + assert len(response.services) == 0 + LOGGER.info('----------------') + + # ----- Dump state of database before create the object ------------------------------------------------------------ + db_entries = database.dump_all() + LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) + for db_entry in db_entries: + LOGGER.info(db_entry) + LOGGER.info('-----------------------------------------------------------') + assert len(db_entries) == 80 + + # ----- Create the object ------------------------------------------------------------------------------------------ + with pytest.raises(grpc.RpcError) as e: + WRONG_SERVICE = copy.deepcopy(SERVICE_R1_R2) + WRONG_SERVICE['service_endpoint_ids'][0]\ + ['topology_id']['context_id']['context_uuid']['uuid'] = 'ca1ea172-728f-441d-972c-feeae8c9bffc' + context_client_grpc.SetService(Service(**WRONG_SERVICE)) + assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT + msg = 'request.service_endpoint_ids[0].topology_id.context_id.context_uuid.uuid(ca1ea172-728f-441d-972c-feeae8c9bffc) is invalid; '\ + 'should be == request.service_id.context_id.context_uuid.uuid({:s})'.format(DEFAULT_CONTEXT_UUID) + assert e.value.details() == msg + + response = context_client_grpc.SetService(Service(**SERVICE_R1_R2)) + assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.service_uuid.uuid == SERVICE_R1_R2_UUID + + CONTEXT_WITH_SERVICE = copy.deepcopy(CONTEXT) + CONTEXT_WITH_SERVICE['service_ids'].append(SERVICE_R1_R2_ID) + response = context_client_grpc.SetContext(Context(**CONTEXT_WITH_SERVICE)) + assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID + + # ----- Check create event ----------------------------------------------------------------------------------------- + events = events_collector.get_events(block=True, count=2) + + assert isinstance(events[0], ServiceEvent) + assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert events[0].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert events[0].service_id.service_uuid.uuid == SERVICE_R1_R2_UUID + + assert isinstance(events[1], ContextEvent) + assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + assert events[1].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + + # ----- Update the object ------------------------------------------------------------------------------------------ + response = context_client_grpc.SetService(Service(**SERVICE_R1_R2)) + assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.service_uuid.uuid == SERVICE_R1_R2_UUID + + # ----- Check update event ----------------------------------------------------------------------------------------- + event = events_collector.get_event(block=True) + assert isinstance(event, ServiceEvent) + assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + assert event.service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert event.service_id.service_uuid.uuid == SERVICE_R1_R2_UUID + + # ----- Dump state of database after create/update the object ------------------------------------------------------ + db_entries = context_database.dump() + LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) + for db_entry in db_entries: + LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover + LOGGER.info('-----------------------------------------------------------') + assert len(db_entries) == 108 + + # ----- Get when the object exists --------------------------------------------------------------------------------- + response = context_client_grpc.GetService(ServiceId(**SERVICE_R1_R2_ID)) + assert response.service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.service_id.service_uuid.uuid == SERVICE_R1_R2_UUID + assert response.service_type == ServiceTypeEnum.SERVICETYPE_L3NM + assert len(response.service_endpoint_ids) == 2 + assert len(response.service_constraints) == 2 + assert response.service_status.service_status == ServiceStatusEnum.SERVICESTATUS_PLANNED + assert len(response.service_config.config_rules) == 3 + + # ----- List when the object exists -------------------------------------------------------------------------------- + response = context_client_grpc.ListServiceIds(ContextId(**CONTEXT_ID)) + assert len(response.service_ids) == 1 + assert response.service_ids[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.service_ids[0].service_uuid.uuid == SERVICE_R1_R2_UUID + + response = context_client_grpc.ListServices(ContextId(**CONTEXT_ID)) + assert len(response.services) == 1 + assert response.services[0].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.services[0].service_id.service_uuid.uuid == SERVICE_R1_R2_UUID + assert response.services[0].service_type == ServiceTypeEnum.SERVICETYPE_L3NM + assert len(response.services[0].service_endpoint_ids) == 2 + assert len(response.services[0].service_constraints) == 2 + assert response.services[0].service_status.service_status == ServiceStatusEnum.SERVICESTATUS_PLANNED + assert len(response.services[0].service_config.config_rules) == 3 + + # ----- Remove the object ------------------------------------------------------------------------------------------ + context_client_grpc.RemoveService(ServiceId(**SERVICE_R1_R2_ID)) + context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R1_ID)) + context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R2_ID)) + context_client_grpc.RemoveTopology(TopologyId(**TOPOLOGY_ID)) + context_client_grpc.RemoveContext(ContextId(**CONTEXT_ID)) + + # ----- Check remove event ----------------------------------------------------------------------------------------- + events = events_collector.get_events(block=True, count=5) + + assert isinstance(events[0], ServiceEvent) + assert events[0].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert events[0].service_id.service_uuid.uuid == SERVICE_R1_R2_UUID + + assert isinstance(events[1], DeviceEvent) + assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert events[1].device_id.device_uuid.uuid == DEVICE_R1_UUID + + assert isinstance(events[2], DeviceEvent) + assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert events[2].device_id.device_uuid.uuid == DEVICE_R2_UUID + + assert isinstance(events[3], TopologyEvent) + assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert events[3].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert events[3].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + + assert isinstance(events[4], ContextEvent) + assert events[4].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert events[4].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + + # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- + events_collector.stop() + + # ----- Dump state of database after remove the object ------------------------------------------------------------- + db_entries = context_database.dump() + LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) + for db_entry in db_entries: + LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover + LOGGER.info('-----------------------------------------------------------') + assert len(db_entries) == 0 + + +""" + +def test_grpc_connection( + context_client_grpc : ContextClient, # pylint: disable=redefined-outer-name + context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name + Session = context_s_mb[0] + + database = Database(Session) + + # ----- Clean the database ----------------------------------------------------------------------------------------- + database.clear() + + # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- + events_collector = EventsCollector(context_client_grpc) + events_collector.start() + + # ----- Prepare dependencies for the test and capture related events ----------------------------------------------- + response = context_client_grpc.SetContext(Context(**CONTEXT)) + assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID + + response = context_client_grpc.SetTopology(Topology(**TOPOLOGY)) + assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + + response = context_client_grpc.SetDevice(Device(**DEVICE_R1)) + assert response.device_uuid.uuid == DEVICE_R1_UUID + + response = context_client_grpc.SetDevice(Device(**DEVICE_R2)) + assert response.device_uuid.uuid == DEVICE_R2_UUID + + response = context_client_grpc.SetDevice(Device(**DEVICE_R3)) + assert response.device_uuid.uuid == DEVICE_R3_UUID + + response = context_client_grpc.SetService(Service(**SERVICE_R1_R2)) + assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.service_uuid.uuid == SERVICE_R1_R2_UUID + + CONTEXT_WITH_SERVICE = copy.deepcopy(CONTEXT) + CONTEXT_WITH_SERVICE['service_ids'].append(SERVICE_R1_R2_ID) + response = context_client_grpc.SetContext(Context(**CONTEXT_WITH_SERVICE)) + assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID + + response = context_client_grpc.SetService(Service(**SERVICE_R2_R3)) + assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.service_uuid.uuid == SERVICE_R2_R3_UUID + + CONTEXT_WITH_SERVICE = copy.deepcopy(CONTEXT) + CONTEXT_WITH_SERVICE['service_ids'].append(SERVICE_R2_R3_ID) + response = context_client_grpc.SetContext(Context(**CONTEXT_WITH_SERVICE)) + assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID + + response = context_client_grpc.SetService(Service(**SERVICE_R1_R3)) + assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.service_uuid.uuid == SERVICE_R1_R3_UUID + + CONTEXT_WITH_SERVICE = copy.deepcopy(CONTEXT) + CONTEXT_WITH_SERVICE['service_ids'].append(SERVICE_R1_R3_ID) + response = context_client_grpc.SetContext(Context(**CONTEXT_WITH_SERVICE)) + assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID + + events = events_collector.get_events(block=True, count=11) + + assert isinstance(events[0], ContextEvent) + assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert events[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + + assert isinstance(events[1], TopologyEvent) + assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert events[1].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert events[1].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + + assert isinstance(events[2], DeviceEvent) + assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert events[2].device_id.device_uuid.uuid == DEVICE_R1_UUID + + assert isinstance(events[3], DeviceEvent) + assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert events[3].device_id.device_uuid.uuid == DEVICE_R2_UUID + + assert isinstance(events[4], DeviceEvent) + assert events[4].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert events[4].device_id.device_uuid.uuid == DEVICE_R3_UUID + + assert isinstance(events[5], ServiceEvent) + assert events[5].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert events[5].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert events[5].service_id.service_uuid.uuid == SERVICE_R1_R2_UUID + + assert isinstance(events[6], ContextEvent) + assert events[6].event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + assert events[6].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + + assert isinstance(events[7], ServiceEvent) + assert events[7].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert events[7].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert events[7].service_id.service_uuid.uuid == SERVICE_R2_R3_UUID + + assert isinstance(events[8], ContextEvent) + assert events[8].event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + assert events[8].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + + assert isinstance(events[9], ServiceEvent) + assert events[9].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert events[9].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert events[9].service_id.service_uuid.uuid == SERVICE_R1_R3_UUID + + assert isinstance(events[10], ContextEvent) + assert events[10].event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + assert events[10].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + + # ----- Get when the object does not exist ------------------------------------------------------------------------- + with pytest.raises(grpc.RpcError) as e: + context_client_grpc.GetConnection(ConnectionId(**CONNECTION_R1_R3_ID)) + assert e.value.code() == grpc.StatusCode.NOT_FOUND + assert e.value.details() == 'Connection({:s}) not found'.format(CONNECTION_R1_R3_UUID) + + # ----- List when the object does not exist ------------------------------------------------------------------------ + response = context_client_grpc.ListConnectionIds(ServiceId(**SERVICE_R1_R3_ID)) + assert len(response.connection_ids) == 0 + + response = context_client_grpc.ListConnections(ServiceId(**SERVICE_R1_R3_ID)) + assert len(response.connections) == 0 + + # ----- Dump state of database before create the object ------------------------------------------------------------ + db_entries = context_database.dump() + LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) + for db_entry in db_entries: + LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover + LOGGER.info('-----------------------------------------------------------') + assert len(db_entries) == 187 + + # ----- Create the object ------------------------------------------------------------------------------------------ + with pytest.raises(grpc.RpcError) as e: + WRONG_CONNECTION = copy.deepcopy(CONNECTION_R1_R3) + WRONG_CONNECTION['path_hops_endpoint_ids'][0]\ + ['topology_id']['context_id']['context_uuid']['uuid'] = 'wrong-context-uuid' + context_client_grpc.SetConnection(Connection(**WRONG_CONNECTION)) + assert e.value.code() == grpc.StatusCode.NOT_FOUND + # TODO: should we check that all endpoints belong to same topology? + # TODO: should we check that endpoints form links over the topology? + msg = 'EndPoint({:s}/{:s}:wrong-context-uuid/{:s}) not found'.format( + DEVICE_R1_UUID, WRONG_CONNECTION['path_hops_endpoint_ids'][0]['endpoint_uuid']['uuid'], DEFAULT_TOPOLOGY_UUID) + assert e.value.details() == msg + + response = context_client_grpc.SetConnection(Connection(**CONNECTION_R1_R3)) + assert response.connection_uuid.uuid == CONNECTION_R1_R3_UUID + + # ----- Check create event ----------------------------------------------------------------------------------------- + event = events_collector.get_event(block=True) + assert isinstance(event, ConnectionEvent) + assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert event.connection_id.connection_uuid.uuid == CONNECTION_R1_R3_UUID + + # ----- Update the object ------------------------------------------------------------------------------------------ + response = context_client_grpc.SetConnection(Connection(**CONNECTION_R1_R3)) + assert response.connection_uuid.uuid == CONNECTION_R1_R3_UUID + + # ----- Check update event ----------------------------------------------------------------------------------------- + event = events_collector.get_event(block=True) + assert isinstance(event, ConnectionEvent) + assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + assert event.connection_id.connection_uuid.uuid == CONNECTION_R1_R3_UUID + + # ----- Dump state of database after create/update the object ------------------------------------------------------ + db_entries = context_database.dump() + LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) + for db_entry in db_entries: + LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover + LOGGER.info('-----------------------------------------------------------') + assert len(db_entries) == 203 + + # ----- Get when the object exists --------------------------------------------------------------------------------- + response = context_client_grpc.GetConnection(ConnectionId(**CONNECTION_R1_R3_ID)) + assert response.connection_id.connection_uuid.uuid == CONNECTION_R1_R3_UUID + assert response.service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.service_id.service_uuid.uuid == SERVICE_R1_R3_UUID + assert len(response.path_hops_endpoint_ids) == 6 + assert len(response.sub_service_ids) == 2 + + # ----- List when the object exists -------------------------------------------------------------------------------- + response = context_client_grpc.ListConnectionIds(ServiceId(**SERVICE_R1_R3_ID)) + assert len(response.connection_ids) == 1 + assert response.connection_ids[0].connection_uuid.uuid == CONNECTION_R1_R3_UUID + + response = context_client_grpc.ListConnections(ServiceId(**SERVICE_R1_R3_ID)) + assert len(response.connections) == 1 + assert response.connections[0].connection_id.connection_uuid.uuid == CONNECTION_R1_R3_UUID + assert len(response.connections[0].path_hops_endpoint_ids) == 6 + assert len(response.connections[0].sub_service_ids) == 2 + + # ----- Remove the object ------------------------------------------------------------------------------------------ + context_client_grpc.RemoveConnection(ConnectionId(**CONNECTION_R1_R3_ID)) + context_client_grpc.RemoveService(ServiceId(**SERVICE_R1_R3_ID)) + context_client_grpc.RemoveService(ServiceId(**SERVICE_R2_R3_ID)) + context_client_grpc.RemoveService(ServiceId(**SERVICE_R1_R2_ID)) + context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R1_ID)) + context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R2_ID)) + context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R3_ID)) + context_client_grpc.RemoveTopology(TopologyId(**TOPOLOGY_ID)) + context_client_grpc.RemoveContext(ContextId(**CONTEXT_ID)) + + # ----- Check remove event ----------------------------------------------------------------------------------------- + events = events_collector.get_events(block=True, count=9) + + assert isinstance(events[0], ConnectionEvent) + assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert events[0].connection_id.connection_uuid.uuid == CONNECTION_R1_R3_UUID + + assert isinstance(events[1], ServiceEvent) + assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert events[1].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert events[1].service_id.service_uuid.uuid == SERVICE_R1_R3_UUID + + assert isinstance(events[2], ServiceEvent) + assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert events[2].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert events[2].service_id.service_uuid.uuid == SERVICE_R2_R3_UUID + + assert isinstance(events[3], ServiceEvent) + assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert events[3].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert events[3].service_id.service_uuid.uuid == SERVICE_R1_R2_UUID + + assert isinstance(events[4], DeviceEvent) + assert events[4].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert events[4].device_id.device_uuid.uuid == DEVICE_R1_UUID + + assert isinstance(events[5], DeviceEvent) + assert events[5].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert events[5].device_id.device_uuid.uuid == DEVICE_R2_UUID + + assert isinstance(events[6], DeviceEvent) + assert events[6].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert events[6].device_id.device_uuid.uuid == DEVICE_R3_UUID + + assert isinstance(events[7], TopologyEvent) + assert events[7].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert events[7].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert events[7].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + + assert isinstance(events[8], ContextEvent) + assert events[8].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert events[8].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + + # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- + events_collector.stop() + + # ----- Dump state of database after remove the object ------------------------------------------------------------- + db_entries = context_database.dump() + LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) + for db_entry in db_entries: + LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover + LOGGER.info('-----------------------------------------------------------') + assert len(db_entries) == 0 + + +def test_grpc_policy( + context_client_grpc : ContextClient, # pylint: disable=redefined-outer-name + context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name + context_database = context_db_mb[0] + + # ----- Clean the database ----------------------------------------------------------------------------------------- + context_database.clear_all() + + # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- + #events_collector = EventsCollector(context_client_grpc) + #events_collector.start() + + # ----- Get when the object does not exist ------------------------------------------------------------------------- + POLICY_ID = 'no-uuid' + DEFAULT_POLICY_ID = {'uuid': {'uuid': POLICY_ID}} + + with pytest.raises(grpc.RpcError) as e: + context_client_grpc.GetPolicyRule(PolicyRuleId(**DEFAULT_POLICY_ID)) + + assert e.value.code() == grpc.StatusCode.NOT_FOUND + assert e.value.details() == 'PolicyRule({:s}) not found'.format(POLICY_ID) + + # ----- List when the object does not exist ------------------------------------------------------------------------ + response = context_client_grpc.ListPolicyRuleIds(Empty()) + assert len(response.policyRuleIdList) == 0 + + response = context_client_grpc.ListPolicyRules(Empty()) + assert len(response.policyRules) == 0 + + # ----- Dump state of database before create the object ------------------------------------------------------------ + db_entries = context_database.dump() + LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) + for db_entry in db_entries: + LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover + LOGGER.info('-----------------------------------------------------------') + assert len(db_entries) == 0 + + # ----- Create the object ------------------------------------------------------------------------------------------ + response = context_client_grpc.SetPolicyRule(PolicyRule(**POLICY_RULE)) + assert response.uuid.uuid == POLICY_RULE_UUID + + # ----- Check create event ----------------------------------------------------------------------------------------- + # events = events_collector.get_events(block=True, count=1) + # assert isinstance(events[0], PolicyEvent) + # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + # assert events[0].policy_id.uuid.uuid == POLICY_RULE_UUID + + # ----- Update the object ------------------------------------------------------------------------------------------ + response = context_client_grpc.SetPolicyRule(PolicyRule(**POLICY_RULE)) + assert response.uuid.uuid == POLICY_RULE_UUID + + # ----- Dump state of database after create/update the object ------------------------------------------------------ + db_entries = context_database.dump() + LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) + for db_entry in db_entries: + LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover + LOGGER.info('-----------------------------------------------------------') + assert len(db_entries) == 2 + + # ----- Get when the object exists --------------------------------------------------------------------------------- + response = context_client_grpc.GetPolicyRule(PolicyRuleId(**POLICY_RULE_ID)) + assert response.device.policyRuleBasic.policyRuleId.uuid.uuid == POLICY_RULE_UUID + + # ----- List when the object exists -------------------------------------------------------------------------------- + response = context_client_grpc.ListPolicyRuleIds(Empty()) + assert len(response.policyRuleIdList) == 1 + assert response.policyRuleIdList[0].uuid.uuid == POLICY_RULE_UUID + + response = context_client_grpc.ListPolicyRules(Empty()) + assert len(response.policyRules) == 1 + + # ----- Remove the object ------------------------------------------------------------------------------------------ + context_client_grpc.RemovePolicyRule(PolicyRuleId(**POLICY_RULE_ID)) + + # ----- Check remove event ----------------------------------------------------------------------------------------- + # events = events_collector.get_events(block=True, count=2) + + # assert isinstance(events[0], PolicyEvent) + # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + # assert events[0].policy_id.uuid.uuid == POLICY_RULE_UUID + + + # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- + # events_collector.stop() + + # ----- Dump state of database after remove the object ------------------------------------------------------------- + db_entries = context_database.dump() + LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) + for db_entry in db_entries: + LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover + LOGGER.info('-----------------------------------------------------------') + assert len(db_entries) == 0 + + + +# ----- Test REST API methods ------------------------------------------------------------------------------------------ + +def test_rest_populate_database( + context_db_mb : Tuple[Database, MessageBroker], # pylint: disable=redefined-outer-name + context_service_grpc : ContextService # pylint: disable=redefined-outer-name + ): + database = context_db_mb[0] + database.clear_all() + populate(LOCAL_HOST, GRPC_PORT) + +def test_rest_get_context_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name + reply = do_rest_request('/context_ids') + validate_context_ids(reply) + +def test_rest_get_contexts(context_service_rest : RestServer): # pylint: disable=redefined-outer-name + reply = do_rest_request('/contexts') + validate_contexts(reply) + +def test_rest_get_context(context_service_rest : RestServer): # pylint: disable=redefined-outer-name + context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) + reply = do_rest_request('/context/{:s}'.format(context_uuid)) + validate_context(reply) + +def test_rest_get_topology_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name + context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) + reply = do_rest_request('/context/{:s}/topology_ids'.format(context_uuid)) + validate_topology_ids(reply) + +def test_rest_get_topologies(context_service_rest : RestServer): # pylint: disable=redefined-outer-name + context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) + reply = do_rest_request('/context/{:s}/topologies'.format(context_uuid)) + validate_topologies(reply) + +def test_rest_get_topology(context_service_rest : RestServer): # pylint: disable=redefined-outer-name + context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) + topology_uuid = urllib.parse.quote(DEFAULT_TOPOLOGY_UUID) + reply = do_rest_request('/context/{:s}/topology/{:s}'.format(context_uuid, topology_uuid)) + validate_topology(reply, num_devices=3, num_links=3) + +def test_rest_get_service_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name + context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) + reply = do_rest_request('/context/{:s}/service_ids'.format(context_uuid)) + validate_service_ids(reply) + +def test_rest_get_services(context_service_rest : RestServer): # pylint: disable=redefined-outer-name + context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) + reply = do_rest_request('/context/{:s}/services'.format(context_uuid)) + validate_services(reply) + +def test_rest_get_service(context_service_rest : RestServer): # pylint: disable=redefined-outer-name + context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) + service_uuid = urllib.parse.quote(SERVICE_R1_R2_UUID, safe='') + reply = do_rest_request('/context/{:s}/service/{:s}'.format(context_uuid, service_uuid)) + validate_service(reply) + +def test_rest_get_slice_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name + context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) + reply = do_rest_request('/context/{:s}/slice_ids'.format(context_uuid)) + #validate_slice_ids(reply) + +def test_rest_get_slices(context_service_rest : RestServer): # pylint: disable=redefined-outer-name + context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) + reply = do_rest_request('/context/{:s}/slices'.format(context_uuid)) + #validate_slices(reply) + +#def test_rest_get_slice(context_service_rest : RestServer): # pylint: disable=redefined-outer-name +# context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) +# slice_uuid = urllib.parse.quote(SLICE_R1_R2_UUID, safe='') +# reply = do_rest_request('/context/{:s}/slice/{:s}'.format(context_uuid, slice_uuid)) +# #validate_slice(reply) + +def test_rest_get_device_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name + reply = do_rest_request('/device_ids') + validate_device_ids(reply) + +def test_rest_get_devices(context_service_rest : RestServer): # pylint: disable=redefined-outer-name + reply = do_rest_request('/devices') + validate_devices(reply) + +def test_rest_get_device(context_service_rest : RestServer): # pylint: disable=redefined-outer-name + device_uuid = urllib.parse.quote(DEVICE_R1_UUID, safe='') + reply = do_rest_request('/device/{:s}'.format(device_uuid)) + validate_device(reply) + +def test_rest_get_link_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name + reply = do_rest_request('/link_ids') + validate_link_ids(reply) + +def test_rest_get_links(context_service_rest : RestServer): # pylint: disable=redefined-outer-name + reply = do_rest_request('/links') + validate_links(reply) + +def test_rest_get_link(context_service_rest : RestServer): # pylint: disable=redefined-outer-name + link_uuid = urllib.parse.quote(LINK_R1_R2_UUID, safe='') + reply = do_rest_request('/link/{:s}'.format(link_uuid)) + validate_link(reply) + +def test_rest_get_connection_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name + context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) + service_uuid = urllib.parse.quote(SERVICE_R1_R3_UUID, safe='') + reply = do_rest_request('/context/{:s}/service/{:s}/connection_ids'.format(context_uuid, service_uuid)) + validate_connection_ids(reply) + +def test_rest_get_connections(context_service_rest : RestServer): # pylint: disable=redefined-outer-name + context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) + service_uuid = urllib.parse.quote(SERVICE_R1_R3_UUID, safe='') + reply = do_rest_request('/context/{:s}/service/{:s}/connections'.format(context_uuid, service_uuid)) + validate_connections(reply) + +def test_rest_get_connection(context_service_rest : RestServer): # pylint: disable=redefined-outer-name + connection_uuid = urllib.parse.quote(CONNECTION_R1_R3_UUID, safe='') + reply = do_rest_request('/connection/{:s}'.format(connection_uuid)) + validate_connection(reply) + +def test_rest_get_policyrule_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name + reply = do_rest_request('/policyrule_ids') + #validate_policyrule_ids(reply) + +def test_rest_get_policyrules(context_service_rest : RestServer): # pylint: disable=redefined-outer-name + reply = do_rest_request('/policyrules') + #validate_policyrules(reply) + +#def test_rest_get_policyrule(context_service_rest : RestServer): # pylint: disable=redefined-outer-name +# policyrule_uuid = urllib.parse.quote(POLICYRULE_UUID, safe='') +# reply = do_rest_request('/policyrule/{:s}'.format(policyrule_uuid)) +# #validate_policyrule(reply) + + +# ----- Test misc. Context internal tools ------------------------------------------------------------------------------ + +def test_tools_fast_string_hasher(): + with pytest.raises(TypeError) as e: + fast_hasher(27) + assert str(e.value) == "data(27) must be " + FASTHASHER_DATA_ACCEPTED_FORMAT + ", found <class 'int'>" + + with pytest.raises(TypeError) as e: + fast_hasher({27}) + assert str(e.value) == "data({27}) must be " + FASTHASHER_DATA_ACCEPTED_FORMAT + ", found <class 'set'>" + + with pytest.raises(TypeError) as e: + fast_hasher({'27'}) + assert str(e.value) == "data({'27'}) must be " + FASTHASHER_DATA_ACCEPTED_FORMAT + ", found <class 'set'>" + + with pytest.raises(TypeError) as e: + fast_hasher([27]) + assert str(e.value) == "data[0](27) must be " + FASTHASHER_ITEM_ACCEPTED_FORMAT + ", found <class 'int'>" + + fast_hasher('hello-world') + fast_hasher('hello-world'.encode('UTF-8')) + fast_hasher(['hello', 'world']) + fast_hasher(('hello', 'world')) + fast_hasher(['hello'.encode('UTF-8'), 'world'.encode('UTF-8')]) + fast_hasher(('hello'.encode('UTF-8'), 'world'.encode('UTF-8'))) +""" \ No newline at end of file diff --git a/src/context/service/database/Base.py b/src/context/service/database/Base.py deleted file mode 100644 index c64447da1..000000000 --- a/src/context/service/database/Base.py +++ /dev/null @@ -1,2 +0,0 @@ -from sqlalchemy.ext.declarative import declarative_base -Base = declarative_base() diff --git a/src/context/service/database/ConfigModel.py b/src/context/service/database/ConfigModel.py index 0de91c2df..5f7111981 100644 --- a/src/context/service/database/ConfigModel.py +++ b/src/context/service/database/ConfigModel.py @@ -19,7 +19,7 @@ from common.proto.context_pb2 import ConfigActionEnum from common.tools.grpc.Tools import grpc_message_to_json_string from sqlalchemy import Column, ForeignKey, INTEGER, CheckConstraint, Enum, String from sqlalchemy.dialects.postgresql import UUID, ARRAY -from context.service.database.Base import Base +from context.service.database._Base import Base from sqlalchemy.orm import relationship from context.service.Database import Database diff --git a/src/context/service/database/ConnectionModel.py b/src/context/service/database/ConnectionModel.py index 1147f3859..e780ccb68 100644 --- a/src/context/service/database/ConnectionModel.py +++ b/src/context/service/database/ConnectionModel.py @@ -36,7 +36,7 @@ from .ConstraintModel import ConstraintsModel from .ContextModel import ContextModel from .Tools import grpc_to_enum from sqlalchemy.dialects.postgresql import UUID -from context.service.database.Base import Base +from context.service.database._Base import Base import enum LOGGER = logging.getLogger(__name__) diff --git a/src/context/service/database/ConstraintModel.py b/src/context/service/database/ConstraintModel.py index cf3b5f0d7..30d900300 100644 --- a/src/context/service/database/ConstraintModel.py +++ b/src/context/service/database/ConstraintModel.py @@ -22,7 +22,7 @@ from .EndPointModel import EndPointModel from .Tools import fast_hasher, remove_dict_key from sqlalchemy import Column, ForeignKey, String, Float, CheckConstraint, Integer, Boolean, Enum from sqlalchemy.dialects.postgresql import UUID -from context.service.database.Base import Base +from context.service.database._Base import Base import enum LOGGER = logging.getLogger(__name__) diff --git a/src/context/service/database/ContextModel.py b/src/context/service/database/ContextModel.py index cde774fe4..46f0741e5 100644 --- a/src/context/service/database/ContextModel.py +++ b/src/context/service/database/ContextModel.py @@ -13,29 +13,27 @@ # limitations under the License. import logging -from typing import Dict, List -from sqlalchemy import Column +from typing import Dict +from sqlalchemy import Column, String from sqlalchemy.dialects.postgresql import UUID -from context.service.database.Base import Base -from sqlalchemy.orm import relationship - +from ._Base import _Base +#from sqlalchemy.orm import relationship LOGGER = logging.getLogger(__name__) - -class ContextModel(Base): - __tablename__ = 'Context' +class ContextModel(_Base): + __tablename__ = 'context' context_uuid = Column(UUID(as_uuid=False), primary_key=True) + context_name = Column(String(), nullable=False) - # Relationships - topology = relationship("TopologyModel", back_populates="context") + #topology = relationship('TopologyModel', back_populates='context') def dump_id(self) -> Dict: return {'context_uuid': {'uuid': self.context_uuid}} - @staticmethod - def main_pk_name(): - return 'context_uuid' + #@staticmethod + #def main_pk_name(): + # return 'context_uuid' """ def dump_service_ids(self) -> List[Dict]: @@ -50,8 +48,7 @@ class ContextModel(Base): """ def dump(self, include_services=True, include_topologies=True) -> Dict: # pylint: disable=arguments-differ - result = {'context_id': self.dump_id()} + result = {'context_id': self.dump_id(), 'name': self.context_name} # if include_services: result['service_ids'] = self.dump_service_ids() # if include_topologies: result['topology_ids'] = self.dump_topology_ids() return result - diff --git a/src/context/service/database/DeviceModel.py b/src/context/service/database/DeviceModel.py index cb4517e68..cb568e123 100644 --- a/src/context/service/database/DeviceModel.py +++ b/src/context/service/database/DeviceModel.py @@ -20,7 +20,7 @@ from common.orm.backend.Tools import key_to_str from common.proto.context_pb2 import DeviceDriverEnum, DeviceOperationalStatusEnum from sqlalchemy import Column, ForeignKey, String, Enum from sqlalchemy.dialects.postgresql import UUID, ARRAY -from context.service.database.Base import Base +from context.service.database._Base import Base from sqlalchemy.orm import relationship from .Tools import grpc_to_enum diff --git a/src/context/service/database/EndPointModel.py b/src/context/service/database/EndPointModel.py index 540453970..38214aa9b 100644 --- a/src/context/service/database/EndPointModel.py +++ b/src/context/service/database/EndPointModel.py @@ -21,7 +21,7 @@ from common.proto.context_pb2 import EndPointId from .KpiSampleType import ORM_KpiSampleTypeEnum, grpc_to_enum__kpi_sample_type from sqlalchemy import Column, ForeignKey, String, Enum, ForeignKeyConstraint from sqlalchemy.dialects.postgresql import UUID -from context.service.database.Base import Base +from context.service.database._Base import Base from sqlalchemy.orm import relationship LOGGER = logging.getLogger(__name__) diff --git a/src/context/service/database/LinkModel.py b/src/context/service/database/LinkModel.py index 025709dfd..6b768d1b7 100644 --- a/src/context/service/database/LinkModel.py +++ b/src/context/service/database/LinkModel.py @@ -16,7 +16,7 @@ import logging, operator from typing import Dict, List from sqlalchemy import Column, ForeignKey from sqlalchemy.dialects.postgresql import UUID -from context.service.database.Base import Base +from context.service.database._Base import Base from sqlalchemy.orm import relationship LOGGER = logging.getLogger(__name__) diff --git a/src/context/service/database/RelationModels.py b/src/context/service/database/RelationModels.py index e69feadc4..61e05db0e 100644 --- a/src/context/service/database/RelationModels.py +++ b/src/context/service/database/RelationModels.py @@ -15,7 +15,7 @@ import logging from sqlalchemy import Column, ForeignKey from sqlalchemy.dialects.postgresql import UUID -from context.service.database.Base import Base +from context.service.database._Base import Base LOGGER = logging.getLogger(__name__) # diff --git a/src/context/service/database/ServiceModel.py b/src/context/service/database/ServiceModel.py index 8f358be52..20e10ddd5 100644 --- a/src/context/service/database/ServiceModel.py +++ b/src/context/service/database/ServiceModel.py @@ -22,7 +22,7 @@ from .ConstraintModel import ConstraintsModel from .ContextModel import ContextModel from .Tools import grpc_to_enum from sqlalchemy.dialects.postgresql import UUID -from context.service.database.Base import Base +from context.service.database._Base import Base import enum LOGGER = logging.getLogger(__name__) diff --git a/src/context/service/database/TopologyModel.py b/src/context/service/database/TopologyModel.py index 063a1f511..0a5698163 100644 --- a/src/context/service/database/TopologyModel.py +++ b/src/context/service/database/TopologyModel.py @@ -17,7 +17,7 @@ from typing import Dict, List from sqlalchemy.orm import relationship from sqlalchemy import Column, ForeignKey from sqlalchemy.dialects.postgresql import UUID -from context.service.database.Base import Base +from context.service.database._Base import Base LOGGER = logging.getLogger(__name__) class TopologyModel(Base): diff --git a/src/context/service/database/_Base.py b/src/context/service/database/_Base.py new file mode 100644 index 000000000..49269be08 --- /dev/null +++ b/src/context/service/database/_Base.py @@ -0,0 +1,22 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sqlalchemy +from sqlalchemy.orm import declarative_base + +_Base = declarative_base() + +def rebuild_database(db_engine : sqlalchemy.engine.Engine, drop_if_exists : bool = False): + if drop_if_exists: _Base.metadata.drop_all(db_engine) + _Base.metadata.create_all(db_engine) diff --git a/src/context/service/database/__init__.py b/src/context/service/database/__init__.py index 70a332512..27b5f5dd2 100644 --- a/src/context/service/database/__init__.py +++ b/src/context/service/database/__init__.py @@ -12,3 +12,4 @@ # See the License for the specific language governing permissions and # limitations under the License. +from ._Base import _Base, rebuild_database diff --git a/src/context/service/grpc_server/ContextServiceServicerImpl.py b/src/context/service/grpc_server/ContextServiceServicerImpl.py deleted file mode 100644 index 4d7f06463..000000000 --- a/src/context/service/grpc_server/ContextServiceServicerImpl.py +++ /dev/null @@ -1,1213 +0,0 @@ -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import uuid - -import grpc, json, logging, operator, threading -from typing import Iterator, List, Set, Tuple, Union -from common.message_broker.MessageBroker import MessageBroker -from context.service.Database import Database -from common.tools.grpc.Tools import grpc_message_to_json_string - -from common.proto.context_pb2 import ( - Connection, ConnectionEvent, ConnectionId, ConnectionIdList, ConnectionList, - Context, ContextEvent, ContextId, ContextIdList, ContextList, - Device, DeviceEvent, DeviceId, DeviceIdList, DeviceList, - Empty, EventTypeEnum, - Link, LinkEvent, LinkId, LinkIdList, LinkList, - Service, ServiceEvent, ServiceId, ServiceIdList, ServiceList, - Slice, SliceEvent, SliceId, SliceIdList, SliceList, - Topology, TopologyEvent, TopologyId, TopologyIdList, TopologyList, - ConfigActionEnum, Constraint) -from common.proto.policy_pb2 import (PolicyRuleIdList, PolicyRuleId, PolicyRuleList, PolicyRule) -from common.proto.context_pb2_grpc import ContextServiceServicer -from common.proto.context_policy_pb2_grpc import ContextPolicyServiceServicer -from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method -from common.rpc_method_wrapper.ServiceExceptions import InvalidArgumentException -from sqlalchemy.orm import Session, contains_eager, selectinload -from common.rpc_method_wrapper.ServiceExceptions import NotFoundException -from context.service.database.ConfigModel import grpc_config_rules_to_raw -from context.service.database.DeviceModel import DeviceModel, grpc_to_enum__device_operational_status, set_drivers, grpc_to_enum__device_driver, DriverModel -from context.service.database.ConfigModel import ConfigModel, ORM_ConfigActionEnum, ConfigRuleModel - -from common.orm.backend.Tools import key_to_str - -from ..database.KpiSampleType import grpc_to_enum__kpi_sample_type - -""" -from context.service.database.ConnectionModel import ConnectionModel, set_path -from context.service.database.ConstraintModel import set_constraints -from common.tools.grpc.Tools import grpc_message_to_json -from context.service.database.ConfigModel import update_config -from context.service.database.ConnectionModel import ConnectionModel, set_path -from context.service.database.ConstraintModel import set_constraints -from context.service.database.ContextModel import ContextModel -from context.service.database.PolicyRuleModel import PolicyRuleModel -from context.service.database.DeviceModel import DeviceModel, grpc_to_enum__device_operational_status, set_drivers -from context.service.database.EndPointModel import EndPointModel, set_kpi_sample_types -from context.service.database.Events import notify_event -from context.service.database.RelationModels import ( - ConnectionSubServiceModel, LinkEndPointModel, ServiceEndPointModel, SliceEndPointModel, SliceServiceModel, - SliceSubSliceModel, TopologyDeviceModel, TopologyLinkModel) -from context.service.database.ServiceModel import ( - ServiceModel, grpc_to_enum__service_status, grpc_to_enum__service_type) -from context.service.database.SliceModel import SliceModel, grpc_to_enum__slice_status -from context.service.database.TopologyModel import TopologyModel -""" -from context.service.database.ContextModel import ContextModel -from context.service.database.TopologyModel import TopologyModel -from context.service.database.Events import notify_event -from context.service.database.EndPointModel import EndPointModel -from context.service.database.EndPointModel import KpiSampleTypeModel -from context.service.database.LinkModel import LinkModel -from context.service.database.ServiceModel import ServiceModel -from context.service.database.ConstraintModel import ConstraintModel, ConstraintsModel, Union_ConstraintModel, CONSTRAINT_PARSERS -from context.service.database.RelationModels import (TopologyDeviceModel, TopologyLinkModel, LinkEndPointModel) - -from .Constants import ( - CONSUME_TIMEOUT, TOPIC_CONNECTION, TOPIC_CONTEXT, TOPIC_DEVICE, TOPIC_LINK, TOPIC_SERVICE, TOPIC_SLICE, - TOPIC_TOPOLOGY) - -LOGGER = logging.getLogger(__name__) - -SERVICE_NAME = 'Context' -METHOD_NAMES = [ - 'ListConnectionIds', 'ListConnections', 'GetConnection', 'SetConnection', 'RemoveConnection', 'GetConnectionEvents', - 'ListContextIds', 'ListContexts', 'GetContext', 'SetContext', 'RemoveContext', 'GetContextEvents', - 'ListTopologyIds', 'ListTopologies', 'GetTopology', 'SetTopology', 'RemoveTopology', 'GetTopologyEvents', - 'ListDeviceIds', 'ListDevices', 'GetDevice', 'SetDevice', 'RemoveDevice', 'GetDeviceEvents', - 'ListLinkIds', 'ListLinks', 'GetLink', 'SetLink', 'RemoveLink', 'GetLinkEvents', - 'ListServiceIds', 'ListServices', 'GetService', 'SetService', 'RemoveService', 'GetServiceEvents', - 'ListSliceIds', 'ListSlices', 'GetSlice', 'SetSlice', 'RemoveSlice', 'GetSliceEvents', - 'ListPolicyRuleIds', 'ListPolicyRules', 'GetPolicyRule', 'SetPolicyRule', 'RemovePolicyRule', - 'UnsetService', 'UnsetSlice', -] -METRICS = create_metrics(SERVICE_NAME, METHOD_NAMES) - -class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceServicer): - #def __init__(self, session : Session, messagebroker : MessageBroker): - def __init__(self, database : Database, messagebroker : MessageBroker): - LOGGER.debug('Creating Servicer...') - self.lock = threading.Lock() - self.session = session - self.database = Database(session) - self.messagebroker = messagebroker - LOGGER.debug('Servicer Created') - - - # ----- Context ---------------------------------------------------------------------------------------------------- - - @safe_and_metered_rpc_method(METRICS, LOGGER) - def ListContextIds(self, request: Empty, context : grpc.ServicerContext) -> ContextIdList: - with self.session() as session: - result = session.query(ContextModel).all() - - return ContextIdList(context_ids=[row.dump_id() for row in result]) - - - @safe_and_metered_rpc_method(METRICS, LOGGER) - def ListContexts(self, request: Empty, context : grpc.ServicerContext) -> ContextList: - with self.session() as session: - result = session.query(ContextModel).all() - - return ContextList(contexts=[row.dump() for row in result]) - - - @safe_and_metered_rpc_method(METRICS, LOGGER) - def GetContext(self, request: ContextId, context : grpc.ServicerContext) -> Context: - context_uuid = request.context_uuid.uuid - with self.session() as session: - result = session.query(ContextModel).filter_by(context_uuid=context_uuid).one_or_none() - - if not result: - raise NotFoundException(ContextModel.__name__.replace('Model', ''), context_uuid) - - return Context(**result.dump()) - - @safe_and_metered_rpc_method(METRICS, LOGGER) - def SetContext(self, request: Context, context : grpc.ServicerContext) -> ContextId: - context_uuid = request.context_id.context_uuid.uuid - - for i, topology_id in enumerate(request.topology_ids): - topology_context_uuid = topology_id.context_id.context_uuid.uuid - if topology_context_uuid != context_uuid: - raise InvalidArgumentException( - 'request.topology_ids[{:d}].context_id.context_uuid.uuid'.format(i), topology_context_uuid, - ['should be == {:s}({:s})'.format('request.context_id.context_uuid.uuid', context_uuid)]) - - for i, service_id in enumerate(request.service_ids): - service_context_uuid = service_id.context_id.context_uuid.uuid - if service_context_uuid != context_uuid: - raise InvalidArgumentException( - 'request.service_ids[{:d}].context_id.context_uuid.uuid'.format(i), service_context_uuid, - ['should be == {:s}({:s})'.format('request.context_id.context_uuid.uuid', context_uuid)]) - - context_add = ContextModel(context_uuid=context_uuid) - - updated = True - with self.session() as session: - result = session.query(ContextModel).filter_by(context_uuid=context_uuid).all() - if not result: - updated = False - session.merge(context_add) - session.commit() - - - event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE - dict_context_id = context_add.dump_id() - notify_event(self.messagebroker, TOPIC_CONTEXT, event_type, {'context_id': dict_context_id}) - return ContextId(**context_add.dump_id()) - - - @safe_and_metered_rpc_method(METRICS, LOGGER) - def RemoveContext(self, request: ContextId, context : grpc.ServicerContext) -> Empty: - context_uuid = request.context_uuid.uuid - - with self.session() as session: - result = session.query(ContextModel).filter_by(context_uuid=context_uuid).one_or_none() - if not result: - return Empty() - session.query(ContextModel).filter_by(context_uuid=context_uuid).delete() - session.commit() - event_type = EventTypeEnum.EVENTTYPE_REMOVE - notify_event(self.messagebroker, TOPIC_CONTEXT, event_type, {'context_id': result.dump_id()}) - return Empty() - - @safe_and_metered_rpc_method(METRICS, LOGGER) - def GetContextEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[ContextEvent]: - for message in self.messagebroker.consume({TOPIC_CONTEXT}, consume_timeout=CONSUME_TIMEOUT): - yield ContextEvent(**json.loads(message.content)) - - - # ----- Topology --------------------------------------------------------------------------------------------------- - - @safe_and_metered_rpc_method(METRICS, LOGGER) - def ListTopologyIds(self, request: ContextId, context : grpc.ServicerContext) -> TopologyIdList: - context_uuid = request.context_uuid.uuid - - with self.session() as session: - result = session.query(ContextModel).options(selectinload(ContextModel.topology)).filter_by(context_uuid=context_uuid).one_or_none() - if not result: - raise NotFoundException(ContextModel.__name__.replace('Model', ''), context_uuid) - - db_topologies = result.topology - return TopologyIdList(topology_ids=[db_topology.dump_id() for db_topology in db_topologies]) - - @safe_and_metered_rpc_method(METRICS, LOGGER) - def ListTopologies(self, request: ContextId, context : grpc.ServicerContext) -> TopologyList: - context_uuid = request.context_uuid.uuid - - with self.session() as session: - result = session.query(ContextModel).options(selectinload(ContextModel.topology)).filter_by( - context_uuid=context_uuid).one_or_none() - if not result: - raise NotFoundException(ContextModel.__name__.replace('Model', ''), context_uuid) - - db_topologies = result.topology - return TopologyList(topologies=[db_topology.dump() for db_topology in db_topologies]) - - @safe_and_metered_rpc_method(METRICS, LOGGER) - def GetTopology(self, request: TopologyId, context : grpc.ServicerContext) -> Topology: - topology_uuid = request.topology_uuid.uuid - - result, dump = self.database.get_object(TopologyModel, topology_uuid, True) - with self.session() as session: - devs = None - links = None - - filt = {'topology_uuid': topology_uuid} - topology_devices = session.query(TopologyDeviceModel).filter_by(**filt).all() - if topology_devices: - devs = [] - for td in topology_devices: - filt = {'device_uuid': td.device_uuid} - devs.append(session.query(DeviceModel).filter_by(**filt).one()) - - filt = {'topology_uuid': topology_uuid} - topology_links = session.query(TopologyLinkModel).filter_by(**filt).all() - if topology_links: - links = [] - for tl in topology_links: - filt = {'link_uuid': tl.link_uuid} - links.append(session.query(LinkModel).filter_by(**filt).one()) - - return Topology(**result.dump(devs, links)) - - - @safe_and_metered_rpc_method(METRICS, LOGGER) - def SetTopology(self, request: Topology, context : grpc.ServicerContext) -> TopologyId: - context_uuid = request.topology_id.context_id.context_uuid.uuid - topology_uuid = request.topology_id.topology_uuid.uuid - with self.session() as session: - topology_add = TopologyModel(topology_uuid=topology_uuid, context_uuid=context_uuid) - updated = True - db_topology = session.query(TopologyModel).join(TopologyModel.context).filter(TopologyModel.topology_uuid==topology_uuid).one_or_none() - if not db_topology: - updated = False - session.merge(topology_add) - session.commit() - db_topology = session.query(TopologyModel).join(TopologyModel.context).filter(TopologyModel.topology_uuid==topology_uuid).one_or_none() - - for device_id in request.device_ids: - device_uuid = device_id.device_uuid.uuid - td = TopologyDeviceModel(topology_uuid=topology_uuid, device_uuid=device_uuid) - result: Tuple[TopologyDeviceModel, bool] = self.database.create_or_update(td) - - - for link_id in request.link_ids: - link_uuid = link_id.link_uuid.uuid - db_link = session.query(LinkModel).filter( - LinkModel.link_uuid == link_uuid).one_or_none() - tl = TopologyLinkModel(topology_uuid=topology_uuid, link_uuid=link_uuid) - result: Tuple[TopologyDeviceModel, bool] = self.database.create_or_update(tl) - - - - event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE - dict_topology_id = db_topology.dump_id() - notify_event(self.messagebroker, TOPIC_TOPOLOGY, event_type, {'topology_id': dict_topology_id}) - return TopologyId(**dict_topology_id) - - @safe_and_metered_rpc_method(METRICS, LOGGER) - def RemoveTopology(self, request: TopologyId, context : grpc.ServicerContext) -> Empty: - context_uuid = request.context_id.context_uuid.uuid - topology_uuid = request.topology_uuid.uuid - - with self.session() as session: - result = session.query(TopologyModel).filter_by(topology_uuid=topology_uuid, context_uuid=context_uuid).one_or_none() - if not result: - return Empty() - dict_topology_id = result.dump_id() - - session.query(TopologyModel).filter_by(topology_uuid=topology_uuid, context_uuid=context_uuid).delete() - session.commit() - event_type = EventTypeEnum.EVENTTYPE_REMOVE - notify_event(self.messagebroker, TOPIC_TOPOLOGY, event_type, {'topology_id': dict_topology_id}) - return Empty() - - @safe_and_metered_rpc_method(METRICS, LOGGER) - def GetTopologyEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[TopologyEvent]: - for message in self.messagebroker.consume({TOPIC_TOPOLOGY}, consume_timeout=CONSUME_TIMEOUT): - yield TopologyEvent(**json.loads(message.content)) - - - # ----- Device ----------------------------------------------------------------------------------------------------- - - @safe_and_metered_rpc_method(METRICS, LOGGER) - def ListDeviceIds(self, request: Empty, context : grpc.ServicerContext) -> DeviceIdList: - with self.session() as session: - result = session.query(DeviceModel).all() - return DeviceIdList(device_ids=[device.dump_id() for device in result]) - - @safe_and_metered_rpc_method(METRICS, LOGGER) - def ListDevices(self, request: Empty, context : grpc.ServicerContext) -> DeviceList: - with self.session() as session: - result = session.query(DeviceModel).all() - return DeviceList(devices=[device.dump() for device in result]) - - @safe_and_metered_rpc_method(METRICS, LOGGER) - def GetDevice(self, request: DeviceId, context : grpc.ServicerContext) -> Device: - device_uuid = request.device_uuid.uuid - with self.session() as session: - result = session.query(DeviceModel).filter(DeviceModel.device_uuid == device_uuid).one_or_none() - if not result: - raise NotFoundException(DeviceModel.__name__.replace('Model', ''), device_uuid) - - rd = result.dump(include_config_rules=True, include_drivers=True, include_endpoints=True) - - rt = Device(**rd) - - return rt - - @safe_and_metered_rpc_method(METRICS, LOGGER) - def SetDevice(self, request: Device, context : grpc.ServicerContext) -> DeviceId: - with self.session() as session: - device_uuid = request.device_id.device_uuid.uuid - - for i, endpoint in enumerate(request.device_endpoints): - endpoint_device_uuid = endpoint.endpoint_id.device_id.device_uuid.uuid - if len(endpoint_device_uuid) == 0: - endpoint_device_uuid = device_uuid - if device_uuid != endpoint_device_uuid: - raise InvalidArgumentException( - 'request.device_endpoints[{:d}].device_id.device_uuid.uuid'.format(i), endpoint_device_uuid, - ['should be == {:s}({:s})'.format('request.device_id.device_uuid.uuid', device_uuid)]) - - config_rules = grpc_config_rules_to_raw(request.device_config.config_rules) - running_config_result = self.update_config(session, device_uuid, 'device', config_rules) - db_running_config = running_config_result[0][0] - config_uuid = db_running_config.config_uuid - running_config_rules = update_config( - self.database, device_uuid, 'device', request.device_config.config_rules) - db_running_config = running_config_rules[0][0] - - new_obj = DeviceModel(**{ - 'device_uuid' : device_uuid, - 'device_type' : request.device_type, - 'device_operational_status' : grpc_to_enum__device_operational_status(request.device_operational_status), - 'device_config_uuid' : config_uuid, - }) - result: Tuple[DeviceModel, bool] = self.database.create_or_update(new_obj) - db_device, updated = result - - self.set_drivers(db_device, request.device_drivers) - - for i, endpoint in enumerate(request.device_endpoints): - endpoint_uuid = endpoint.endpoint_id.endpoint_uuid.uuid - # endpoint_device_uuid = endpoint.endpoint_id.device_id.device_uuid.uuid - # if len(endpoint_device_uuid) == 0: - # endpoint_device_uuid = device_uuid - - endpoint_attributes = { - 'device_uuid' : db_device.device_uuid, - 'endpoint_uuid': endpoint_uuid, - 'endpoint_type': endpoint.endpoint_type, - } - - endpoint_topology_context_uuid = endpoint.endpoint_id.topology_id.context_id.context_uuid.uuid - endpoint_topology_uuid = endpoint.endpoint_id.topology_id.topology_uuid.uuid - if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0: - # str_topology_key = key_to_str([endpoint_topology_context_uuid, endpoint_topology_uuid]) - - db_topology, topo_dump = self.database.get_object(TopologyModel, endpoint_topology_uuid) - - topology_device = TopologyDeviceModel( - topology_uuid=endpoint_topology_uuid, - device_uuid=db_device.device_uuid) - self.database.create_or_update(topology_device) - - endpoint_attributes['topology_uuid'] = db_topology.topology_uuid - result : Tuple[EndPointModel, bool] = update_or_create_object( - self.database, EndPointModel, str_endpoint_key, endpoint_attributes) - db_endpoint, endpoint_updated = result # pylint: disable=unused-variable - - new_endpoint = EndPointModel(**endpoint_attributes) - result: Tuple[EndPointModel, bool] = self.database.create_or_update(new_endpoint) - db_endpoint, updated = result - - self.set_kpi_sample_types(db_endpoint, endpoint.kpi_sample_types) - - # event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE - dict_device_id = db_device.dump_id() - # notify_event(self.messagebroker, TOPIC_DEVICE, event_type, {'device_id': dict_device_id}) - - return DeviceId(**dict_device_id) - - def set_kpi_sample_types(self, db_endpoint: EndPointModel, grpc_endpoint_kpi_sample_types): - db_endpoint_pk = db_endpoint.endpoint_uuid - for kpi_sample_type in grpc_endpoint_kpi_sample_types: - orm_kpi_sample_type = grpc_to_enum__kpi_sample_type(kpi_sample_type) - # str_endpoint_kpi_sample_type_key = key_to_str([db_endpoint_pk, orm_kpi_sample_type.name]) - data = {'endpoint_uuid': db_endpoint_pk, - 'kpi_sample_type': orm_kpi_sample_type.name, - 'kpi_uuid': str(uuid.uuid4())} - db_endpoint_kpi_sample_type = KpiSampleTypeModel(**data) - self.database.create(db_endpoint_kpi_sample_type) - - def set_drivers(self, db_device: DeviceModel, grpc_device_drivers): - db_device_pk = db_device.device_uuid - for driver in grpc_device_drivers: - orm_driver = grpc_to_enum__device_driver(driver) - str_device_driver_key = key_to_str([db_device_pk, orm_driver.name]) - driver_config = { - # "driver_uuid": str(uuid.uuid4()), - "device_uuid": db_device_pk, - "driver": orm_driver.name - } - db_device_driver = DriverModel(**driver_config) - db_device_driver.device_fk = db_device - db_device_driver.driver = orm_driver - - self.database.create_or_update(db_device_driver) - - def update_config( - self, session, db_parent_pk: str, config_name: str, - raw_config_rules: List[Tuple[ORM_ConfigActionEnum, str, str]] - ) -> List[Tuple[Union[ConfigModel, ConfigRuleModel], bool]]: - - created = False - - db_config = session.query(ConfigModel).filter_by(**{ConfigModel.main_pk_name(): db_parent_pk}).one_or_none() - if not db_config: - db_config = ConfigModel() - setattr(db_config, ConfigModel.main_pk_name(), db_parent_pk) - session.add(db_config) - session.commit() - created = True - - LOGGER.info('UPDATED-CONFIG: {}'.format(db_config.dump())) - - db_objects: List[Tuple[Union[ConfigModel, ConfigRuleModel], bool]] = [(db_config, created)] - - for position, (action, resource_key, resource_value) in enumerate(raw_config_rules): - if action == ORM_ConfigActionEnum.SET: - result : Tuple[ConfigRuleModel, bool] = self.set_config_rule( - db_config, position, resource_key, resource_value) - db_config_rule, updated = result - db_objects.append((db_config_rule, updated)) - elif action == ORM_ConfigActionEnum.DELETE: - self.delete_config_rule(db_config, resource_key) - else: - msg = 'Unsupported action({:s}) for resource_key({:s})/resource_value({:s})' - raise AttributeError( - msg.format(str(ConfigActionEnum.Name(action)), str(resource_key), str(resource_value))) - - return db_objects - - def set_config_rule(self, db_config: ConfigModel, position: int, resource_key: str, resource_value: str, - ): # -> Tuple[ConfigRuleModel, bool]: - - from src.context.service.database.Tools import fast_hasher - str_rule_key_hash = fast_hasher(resource_key) - str_config_rule_key = key_to_str([db_config.config_uuid, str_rule_key_hash], separator=':') - pk = str(uuid.uuid5(uuid.UUID('9566448d-e950-425e-b2ae-7ead656c7e47'), str_config_rule_key)) - data = {'config_rule_uuid': pk, 'config_uuid': db_config.config_uuid, 'position': position, - 'action': ORM_ConfigActionEnum.SET, 'key': resource_key, 'value': resource_value} - to_add = ConfigRuleModel(**data) - - result, updated = self.database.create_or_update(to_add) - return result, updated - - def delete_config_rule( - self, db_config: ConfigModel, resource_key: str - ) -> None: - - from src.context.service.database.Tools import fast_hasher - str_rule_key_hash = fast_hasher(resource_key) - str_config_rule_key = key_to_str([db_config.pk, str_rule_key_hash], separator=':') - - db_config_rule = self.database.get_object(ConfigRuleModel, str_config_rule_key, raise_if_not_found=False) - - if db_config_rule is None: - return - db_config_rule.delete() - - def delete_all_config_rules(self, db_config: ConfigModel) -> None: - - db_config_rule_pks = db_config.references(ConfigRuleModel) - for pk, _ in db_config_rule_pks: ConfigRuleModel(self.database, pk).delete() - - """ - for position, (action, resource_key, resource_value) in enumerate(raw_config_rules): - if action == ORM_ConfigActionEnum.SET: - result: Tuple[ConfigRuleModel, bool] = set_config_rule( - database, db_config, position, resource_key, resource_value) - db_config_rule, updated = result - db_objects.append((db_config_rule, updated)) - elif action == ORM_ConfigActionEnum.DELETE: - delete_config_rule(database, db_config, resource_key) - else: - msg = 'Unsupported action({:s}) for resource_key({:s})/resource_value({:s})' - raise AttributeError( - msg.format(str(ConfigActionEnum.Name(action)), str(resource_key), str(resource_value))) - - return db_objects - """ - - @safe_and_metered_rpc_method(METRICS, LOGGER) - def RemoveDevice(self, request: DeviceId, context : grpc.ServicerContext) -> Empty: - device_uuid = request.device_uuid.uuid - - with self.session() as session: - db_device = session.query(DeviceModel).filter_by(device_uuid=device_uuid).one_or_none() - - session.query(TopologyDeviceModel).filter_by(device_uuid=device_uuid).delete() - session.query(ConfigRuleModel).filter_by(config_uuid=db_device.device_config_uuid).delete() - session.query(ConfigModel).filter_by(config_uuid=db_device.device_config_uuid).delete() - - if not db_device: - return Empty() - dict_device_id = db_device.dump_id() - - session.query(DeviceModel).filter_by(device_uuid=device_uuid).delete() - session.commit() - event_type = EventTypeEnum.EVENTTYPE_REMOVE - notify_event(self.messagebroker, TOPIC_DEVICE, event_type, {'device_id': dict_device_id}) - return Empty() - - @safe_and_metered_rpc_method(METRICS, LOGGER) - def GetDeviceEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[DeviceEvent]: - for message in self.messagebroker.consume({TOPIC_DEVICE}, consume_timeout=CONSUME_TIMEOUT): - yield DeviceEvent(**json.loads(message.content)) - - - - - # ----- Link ------------------------------------------------------------------------------------------------------- - - @safe_and_metered_rpc_method(METRICS, LOGGER) - def ListLinkIds(self, request: Empty, context : grpc.ServicerContext) -> LinkIdList: - with self.session() as session: - result = session.query(LinkModel).all() - return LinkIdList(link_ids=[db_link.dump_id() for db_link in result]) - - - @safe_and_metered_rpc_method(METRICS, LOGGER) - def ListLinks(self, request: Empty, context : grpc.ServicerContext) -> LinkList: - with self.session() as session: - link_list = LinkList() - - db_links = session.query(LinkModel).all() - - for db_link in db_links: - link_uuid = db_link.link_uuid - filt = {'link_uuid': link_uuid} - link_endpoints = session.query(LinkEndPointModel).filter_by(**filt).all() - if link_endpoints: - eps = [] - for lep in link_endpoints: - filt = {'endpoint_uuid': lep.endpoint_uuid} - eps.append(session.query(EndPointModel).filter_by(**filt).one()) - link_list.links.append(Link(**db_link.dump(eps))) - - return link_list - - @safe_and_metered_rpc_method(METRICS, LOGGER) - def GetLink(self, request: LinkId, context : grpc.ServicerContext) -> Link: - link_uuid = request.link_uuid.uuid - with self.session() as session: - result = session.query(LinkModel).filter(LinkModel.link_uuid == link_uuid).one_or_none() - if not result: - raise NotFoundException(LinkModel.__name__.replace('Model', ''), link_uuid) - - filt = {'link_uuid': link_uuid} - link_endpoints = session.query(LinkEndPointModel).filter_by(**filt).all() - if link_endpoints: - eps = [] - for lep in link_endpoints: - filt = {'endpoint_uuid': lep.endpoint_uuid} - eps.append(session.query(EndPointModel).filter_by(**filt).one()) - return Link(**result.dump(eps)) - - rd = result.dump() - rt = Link(**rd) - - return rt - - - - @safe_and_metered_rpc_method(METRICS, LOGGER) - def SetLink(self, request: Link, context : grpc.ServicerContext) -> LinkId: - link_uuid = request.link_id.link_uuid.uuid - - new_link = LinkModel(**{ - 'link_uuid': link_uuid - }) - result: Tuple[LinkModel, bool] = self.database.create_or_update(new_link) - db_link, updated = result - - for endpoint_id in request.link_endpoint_ids: - endpoint_uuid = endpoint_id.endpoint_uuid.uuid - endpoint_device_uuid = endpoint_id.device_id.device_uuid.uuid - endpoint_topology_uuid = endpoint_id.topology_id.topology_uuid.uuid - endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid - - - db_topology = None - if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0: - db_topology: TopologyModel = self.database.get_object(TopologyModel, endpoint_topology_uuid) - # check device is in topology - self.database.get_object(TopologyDeviceModel, endpoint_device_uuid) - - - link_endpoint = LinkEndPointModel(link_uuid=link_uuid, endpoint_uuid=endpoint_uuid) - result: Tuple[LinkEndPointModel, bool] = self.database.create_or_update(link_endpoint) - - if db_topology is not None: - topology_link = TopologyLinkModel(topology_uuid=endpoint_topology_uuid, link_uuid=link_uuid) - result: Tuple[TopologyLinkModel, bool] = self.database.create_or_update(topology_link) - - event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE - dict_link_id = db_link.dump_id() - notify_event(self.messagebroker, TOPIC_LINK, event_type, {'link_id': dict_link_id}) - return LinkId(**dict_link_id) - - @safe_and_metered_rpc_method(METRICS, LOGGER) - def RemoveLink(self, request: LinkId, context : grpc.ServicerContext) -> Empty: - with self.session() as session: - link_uuid = request.link_uuid.uuid - - session.query(TopologyLinkModel).filter_by(link_uuid=link_uuid).delete() - session.query(LinkEndPointModel).filter_by(link_uuid=link_uuid).delete() - - result = session.query(LinkModel).filter_by(link_uuid=link_uuid).one_or_none() - if not result: - return Empty() - dict_link_id = result.dump_id() - - session.query(LinkModel).filter_by(link_uuid=link_uuid).delete() - session.commit() - event_type = EventTypeEnum.EVENTTYPE_REMOVE - notify_event(self.messagebroker, TOPIC_LINK, event_type, {'link_id': dict_link_id}) - return Empty() - - @safe_and_metered_rpc_method(METRICS, LOGGER) - def GetLinkEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[LinkEvent]: - for message in self.messagebroker.consume({TOPIC_LINK}, consume_timeout=CONSUME_TIMEOUT): - yield LinkEvent(**json.loads(message.content)) - - - # ----- Service ---------------------------------------------------------------------------------------------------- - - @safe_and_metered_rpc_method(METRICS, LOGGER) - def ListServiceIds(self, request: ContextId, context : grpc.ServicerContext) -> ServiceIdList: - context_uuid = request.context_uuid.uuid - - with self.session() as session: - db_services = session.query(ServiceModel).filter_by(context_uuid=context_uuid).all() - return ServiceIdList(service_ids=[db_service.dump_id() for db_service in db_services]) - - @safe_and_metered_rpc_method(METRICS, LOGGER) - def ListServices(self, request: ContextId, context : grpc.ServicerContext) -> ServiceList: - context_uuid = request.context_uuid.uuid - - with self.session() as session: - db_services = session.query(ServiceModel).filter_by(context_uuid=context_uuid).all() - return ServiceList(services=[db_service.dump() for db_service in db_services]) - - - - @safe_and_metered_rpc_method(METRICS, LOGGER) - def GetService(self, request: ServiceId, context : grpc.ServicerContext) -> Service: - service_uuid = request.service_uuid.uuid - with self.session() as session: - result = session.query(ServiceModel).filter_by(service_uuid=service_uuid).one_or_none() - - if not result: - raise NotFoundException(ServiceModel.__name__.replace('Model', ''), service_uuid) - - return Service(**result.dump()) - - def set_constraint(self, db_constraints: ConstraintsModel, grpc_constraint: Constraint, position: int - ) -> Tuple[Union_ConstraintModel, bool]: - with self.session() as session: - - grpc_constraint_kind = str(grpc_constraint.WhichOneof('constraint')) - - parser = CONSTRAINT_PARSERS.get(grpc_constraint_kind) - if parser is None: - raise NotImplementedError('Constraint of kind {:s} is not implemented: {:s}'.format( - grpc_constraint_kind, grpc_message_to_json_string(grpc_constraint))) - - # create specific constraint - constraint_class, str_constraint_id, constraint_data, constraint_kind = parser(grpc_constraint) - str_constraint_id = str(uuid.uuid4()) - LOGGER.info('str_constraint_id: {}'.format(str_constraint_id)) - # str_constraint_key_hash = fast_hasher(':'.join([constraint_kind.value, str_constraint_id])) - # str_constraint_key = key_to_str([db_constraints.pk, str_constraint_key_hash], separator=':') - - # result : Tuple[Union_ConstraintModel, bool] = update_or_create_object( - # database, constraint_class, str_constraint_key, constraint_data) - constraint_data[constraint_class.main_pk_name()] = str_constraint_id - db_new_constraint = constraint_class(**constraint_data) - result: Tuple[Union_ConstraintModel, bool] = self.database.create_or_update(db_new_constraint) - db_specific_constraint, updated = result - - # create generic constraint - # constraint_fk_field_name = 'constraint_uuid'.format(constraint_kind.value) - constraint_data = { - 'constraints_uuid': db_constraints.constraints_uuid, 'position': position, 'kind': constraint_kind - } - - db_new_constraint = ConstraintModel(**constraint_data) - result: Tuple[Union_ConstraintModel, bool] = self.database.create_or_update(db_new_constraint) - db_constraint, updated = result - - return db_constraint, updated - - def set_constraints(self, service_uuid: str, constraints_name : str, grpc_constraints - ) -> List[Tuple[Union[ConstraintsModel, ConstraintModel], bool]]: - with self.session() as session: - # str_constraints_key = key_to_str([db_parent_pk, constraints_name], separator=':') - # result : Tuple[ConstraintsModel, bool] = get_or_create_object(database, ConstraintsModel, str_constraints_key) - result = session.query(ConstraintsModel).filter_by(constraints_uuid=service_uuid).one_or_none() - created = None - if result: - created = True - session.query(ConstraintsModel).filter_by(constraints_uuid=service_uuid).one_or_none() - db_constraints = ConstraintsModel(constraints_uuid=service_uuid) - session.add(db_constraints) - - db_objects = [(db_constraints, created)] - - for position,grpc_constraint in enumerate(grpc_constraints): - result : Tuple[ConstraintModel, bool] = self.set_constraint( - db_constraints, grpc_constraint, position) - db_constraint, updated = result - db_objects.append((db_constraint, updated)) - - return db_objects - - @safe_and_metered_rpc_method(METRICS, LOGGER) - def SetService(self, request: Service, context : grpc.ServicerContext) -> ServiceId: - with self.lock: - with self.session() as session: - - context_uuid = request.service_id.context_id.context_uuid.uuid - # db_context : ContextModel = get_object(self.database, ContextModel, context_uuid) - db_context = session.query(ContextModel).filter_by(context_uuid=context_uuid).one_or_none() - - for i,endpoint_id in enumerate(request.service_endpoint_ids): - endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid - if len(endpoint_topology_context_uuid) > 0 and context_uuid != endpoint_topology_context_uuid: - raise InvalidArgumentException( - 'request.service_endpoint_ids[{:d}].topology_id.context_id.context_uuid.uuid'.format(i), - endpoint_topology_context_uuid, - ['should be == {:s}({:s})'.format( - 'request.service_id.context_id.context_uuid.uuid', context_uuid)]) - - service_uuid = request.service_id.service_uuid.uuid - # str_service_key = key_to_str([context_uuid, service_uuid]) - - constraints_result = self.set_constraints(service_uuid, 'constraints', request.service_constraints) - db_constraints = constraints_result[0][0] - - config_rules = grpc_config_rules_to_raw(request.service_config.config_rules) - running_config_result = update_config(self.database, str_service_key, 'running', config_rules) - db_running_config = running_config_result[0][0] - - result : Tuple[ServiceModel, bool] = update_or_create_object(self.database, ServiceModel, str_service_key, { - 'context_fk' : db_context, - 'service_uuid' : service_uuid, - 'service_type' : grpc_to_enum__service_type(request.service_type), - 'service_constraints_fk': db_constraints, - 'service_status' : grpc_to_enum__service_status(request.service_status.service_status), - 'service_config_fk' : db_running_config, - }) - db_service, updated = result - - for i,endpoint_id in enumerate(request.service_endpoint_ids): - endpoint_uuid = endpoint_id.endpoint_uuid.uuid - endpoint_device_uuid = endpoint_id.device_id.device_uuid.uuid - endpoint_topology_uuid = endpoint_id.topology_id.topology_uuid.uuid - endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid - - str_endpoint_key = key_to_str([endpoint_device_uuid, endpoint_uuid]) - if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0: - str_topology_key = key_to_str([endpoint_topology_context_uuid, endpoint_topology_uuid]) - str_endpoint_key = key_to_str([str_endpoint_key, str_topology_key], separator=':') - - db_endpoint : EndPointModel = get_object(self.database, EndPointModel, str_endpoint_key) - - str_service_endpoint_key = key_to_str([service_uuid, str_endpoint_key], separator='--') - result : Tuple[ServiceEndPointModel, bool] = get_or_create_object( - self.database, ServiceEndPointModel, str_service_endpoint_key, { - 'service_fk': db_service, 'endpoint_fk': db_endpoint}) - #db_service_endpoint, service_endpoint_created = result - - event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE - dict_service_id = db_service.dump_id() - notify_event(self.messagebroker, TOPIC_SERVICE, event_type, {'service_id': dict_service_id}) - return ServiceId(**dict_service_id) - context_uuid = request.service_id.context_id.context_uuid.uuid - db_context : ContextModel = get_object(self.database, ContextModel, context_uuid) - - for i,endpoint_id in enumerate(request.service_endpoint_ids): - endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid - if len(endpoint_topology_context_uuid) > 0 and context_uuid != endpoint_topology_context_uuid: - raise InvalidArgumentException( - 'request.service_endpoint_ids[{:d}].topology_id.context_id.context_uuid.uuid'.format(i), - endpoint_topology_context_uuid, - ['should be == {:s}({:s})'.format( - 'request.service_id.context_id.context_uuid.uuid', context_uuid)]) - - service_uuid = request.service_id.service_uuid.uuid - str_service_key = key_to_str([context_uuid, service_uuid]) - - constraints_result = set_constraints( - self.database, str_service_key, 'service', request.service_constraints) - db_constraints = constraints_result[0][0] - - running_config_rules = update_config( - self.database, str_service_key, 'service', request.service_config.config_rules) - db_running_config = running_config_rules[0][0] - - result : Tuple[ServiceModel, bool] = update_or_create_object(self.database, ServiceModel, str_service_key, { - 'context_fk' : db_context, - 'service_uuid' : service_uuid, - 'service_type' : grpc_to_enum__service_type(request.service_type), - 'service_constraints_fk': db_constraints, - 'service_status' : grpc_to_enum__service_status(request.service_status.service_status), - 'service_config_fk' : db_running_config, - }) - db_service, updated = result - - for i,endpoint_id in enumerate(request.service_endpoint_ids): - endpoint_uuid = endpoint_id.endpoint_uuid.uuid - endpoint_device_uuid = endpoint_id.device_id.device_uuid.uuid - endpoint_topology_uuid = endpoint_id.topology_id.topology_uuid.uuid - endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid - - str_endpoint_key = key_to_str([endpoint_device_uuid, endpoint_uuid]) - if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0: - str_topology_key = key_to_str([endpoint_topology_context_uuid, endpoint_topology_uuid]) - str_endpoint_key = key_to_str([str_endpoint_key, str_topology_key], separator=':') - - db_endpoint : EndPointModel = get_object(self.database, EndPointModel, str_endpoint_key) - - str_service_endpoint_key = key_to_str([service_uuid, str_endpoint_key], separator='--') - result : Tuple[ServiceEndPointModel, bool] = get_or_create_object( - self.database, ServiceEndPointModel, str_service_endpoint_key, { - 'service_fk': db_service, 'endpoint_fk': db_endpoint}) - #db_service_endpoint, service_endpoint_created = result - - event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE - dict_service_id = db_service.dump_id() - notify_event(self.messagebroker, TOPIC_SERVICE, event_type, {'service_id': dict_service_id}) - return ServiceId(**dict_service_id) - - @safe_and_metered_rpc_method(METRICS, LOGGER) - def RemoveService(self, request: ServiceId, context : grpc.ServicerContext) -> Empty: - with self.lock: - context_uuid = request.context_id.context_uuid.uuid - service_uuid = request.service_uuid.uuid - db_service = ServiceModel(self.database, key_to_str([context_uuid, service_uuid]), auto_load=False) - found = db_service.load() - if not found: return Empty() - - dict_service_id = db_service.dump_id() - db_service.delete() - - event_type = EventTypeEnum.EVENTTYPE_REMOVE - notify_event(self.messagebroker, TOPIC_SERVICE, event_type, {'service_id': dict_service_id}) - return Empty() - - @safe_and_metered_rpc_method(METRICS, LOGGER) - def GetServiceEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[ServiceEvent]: - for message in self.messagebroker.consume({TOPIC_SERVICE}, consume_timeout=CONSUME_TIMEOUT): - yield ServiceEvent(**json.loads(message.content)) - - - # ----- Slice ---------------------------------------------------------------------------------------------------- - - @safe_and_metered_rpc_method(METRICS, LOGGER) - def ListSliceIds(self, request: ContextId, context : grpc.ServicerContext) -> SliceIdList: - with self.lock: - db_context : ContextModel = get_object(self.database, ContextModel, request.context_uuid.uuid) - db_slices : Set[SliceModel] = get_related_objects(db_context, SliceModel) - db_slices = sorted(db_slices, key=operator.attrgetter('pk')) - return SliceIdList(slice_ids=[db_slice.dump_id() for db_slice in db_slices]) - - @safe_and_metered_rpc_method(METRICS, LOGGER) - def ListSlices(self, request: ContextId, context : grpc.ServicerContext) -> SliceList: - with self.lock: - db_context : ContextModel = get_object(self.database, ContextModel, request.context_uuid.uuid) - db_slices : Set[SliceModel] = get_related_objects(db_context, SliceModel) - db_slices = sorted(db_slices, key=operator.attrgetter('pk')) - return SliceList(slices=[db_slice.dump() for db_slice in db_slices]) - - @safe_and_metered_rpc_method(METRICS, LOGGER) - def GetSlice(self, request: SliceId, context : grpc.ServicerContext) -> Slice: - with self.lock: - str_key = key_to_str([request.context_id.context_uuid.uuid, request.slice_uuid.uuid]) - db_slice : SliceModel = get_object(self.database, SliceModel, str_key) - return Slice(**db_slice.dump( - include_endpoint_ids=True, include_constraints=True, include_config_rules=True, - include_service_ids=True, include_subslice_ids=True)) - - @safe_and_metered_rpc_method(METRICS, LOGGER) - def SetSlice(self, request: Slice, context : grpc.ServicerContext) -> SliceId: - with self.lock: - context_uuid = request.slice_id.context_id.context_uuid.uuid - db_context : ContextModel = get_object(self.database, ContextModel, context_uuid) - - for i,endpoint_id in enumerate(request.slice_endpoint_ids): - endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid - if len(endpoint_topology_context_uuid) > 0 and context_uuid != endpoint_topology_context_uuid: - raise InvalidArgumentException( - 'request.slice_endpoint_ids[{:d}].topology_id.context_id.context_uuid.uuid'.format(i), - endpoint_topology_context_uuid, - ['should be == {:s}({:s})'.format( - 'request.slice_id.context_id.context_uuid.uuid', context_uuid)]) - - slice_uuid = request.slice_id.slice_uuid.uuid - str_slice_key = key_to_str([context_uuid, slice_uuid]) - - constraints_result = set_constraints( - self.database, str_slice_key, 'slice', request.slice_constraints) - db_constraints = constraints_result[0][0] - - running_config_rules = update_config( - self.database, str_slice_key, 'slice', request.slice_config.config_rules) - db_running_config = running_config_rules[0][0] - - result : Tuple[SliceModel, bool] = update_or_create_object(self.database, SliceModel, str_slice_key, { - 'context_fk' : db_context, - 'slice_uuid' : slice_uuid, - 'slice_constraints_fk': db_constraints, - 'slice_status' : grpc_to_enum__slice_status(request.slice_status.slice_status), - 'slice_config_fk' : db_running_config, - 'slice_owner_uuid' : request.slice_owner.owner_uuid.uuid, - 'slice_owner_string' : request.slice_owner.owner_string, - }) - db_slice, updated = result - - for i,endpoint_id in enumerate(request.slice_endpoint_ids): - endpoint_uuid = endpoint_id.endpoint_uuid.uuid - endpoint_device_uuid = endpoint_id.device_id.device_uuid.uuid - endpoint_topology_uuid = endpoint_id.topology_id.topology_uuid.uuid - endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid - - str_endpoint_key = key_to_str([endpoint_device_uuid, endpoint_uuid]) - if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0: - str_topology_key = key_to_str([endpoint_topology_context_uuid, endpoint_topology_uuid]) - str_endpoint_key = key_to_str([str_endpoint_key, str_topology_key], separator=':') - - db_endpoint : EndPointModel = get_object(self.database, EndPointModel, str_endpoint_key) - - str_slice_endpoint_key = key_to_str([str_slice_key, str_endpoint_key], separator='--') - result : Tuple[SliceEndPointModel, bool] = get_or_create_object( - self.database, SliceEndPointModel, str_slice_endpoint_key, { - 'slice_fk': db_slice, 'endpoint_fk': db_endpoint}) - #db_slice_endpoint, slice_endpoint_created = result - - for i,service_id in enumerate(request.slice_service_ids): - service_uuid = service_id.service_uuid.uuid - service_context_uuid = service_id.context_id.context_uuid.uuid - str_service_key = key_to_str([service_context_uuid, service_uuid]) - db_service : ServiceModel = get_object(self.database, ServiceModel, str_service_key) - - str_slice_service_key = key_to_str([str_slice_key, str_service_key], separator='--') - result : Tuple[SliceServiceModel, bool] = get_or_create_object( - self.database, SliceServiceModel, str_slice_service_key, { - 'slice_fk': db_slice, 'service_fk': db_service}) - #db_slice_service, slice_service_created = result - - for i,subslice_id in enumerate(request.slice_subslice_ids): - subslice_uuid = subslice_id.slice_uuid.uuid - subslice_context_uuid = subslice_id.context_id.context_uuid.uuid - str_subslice_key = key_to_str([subslice_context_uuid, subslice_uuid]) - db_subslice : SliceModel = get_object(self.database, SliceModel, str_subslice_key) - - str_slice_subslice_key = key_to_str([str_slice_key, str_subslice_key], separator='--') - result : Tuple[SliceSubSliceModel, bool] = get_or_create_object( - self.database, SliceSubSliceModel, str_slice_subslice_key, { - 'slice_fk': db_slice, 'sub_slice_fk': db_subslice}) - #db_slice_subslice, slice_subslice_created = result - - event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE - dict_slice_id = db_slice.dump_id() - notify_event(self.messagebroker, TOPIC_SLICE, event_type, {'slice_id': dict_slice_id}) - return SliceId(**dict_slice_id) - - @safe_and_metered_rpc_method(METRICS, LOGGER) - def UnsetSlice(self, request: Slice, context : grpc.ServicerContext) -> SliceId: - with self.lock: - context_uuid = request.slice_id.context_id.context_uuid.uuid - db_context : ContextModel = get_object(self.database, ContextModel, context_uuid) - - for i,endpoint_id in enumerate(request.slice_endpoint_ids): - endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid - if len(endpoint_topology_context_uuid) > 0 and context_uuid != endpoint_topology_context_uuid: - raise InvalidArgumentException( - 'request.slice_endpoint_ids[{:d}].topology_id.context_id.context_uuid.uuid'.format(i), - endpoint_topology_context_uuid, - ['should be == {:s}({:s})'.format( - 'request.slice_id.context_id.context_uuid.uuid', context_uuid)]) - - slice_uuid = request.slice_id.slice_uuid.uuid - str_slice_key = key_to_str([context_uuid, slice_uuid]) - - if len(request.slice_constraints) > 0: - raise NotImplementedError('UnsetSlice: removal of constraints') - if len(request.slice_config.config_rules) > 0: - raise NotImplementedError('UnsetSlice: removal of config rules') - if len(request.slice_endpoint_ids) > 0: - raise NotImplementedError('UnsetSlice: removal of endpoints') - - updated = False - - for service_id in request.slice_service_ids: - service_uuid = service_id.service_uuid.uuid - service_context_uuid = service_id.context_id.context_uuid.uuid - str_service_key = key_to_str([service_context_uuid, service_uuid]) - str_slice_service_key = key_to_str([str_slice_key, str_service_key], separator='--') - SliceServiceModel(self.database, str_slice_service_key).delete() - updated = True - - for subslice_id in request.slice_subslice_ids: - subslice_uuid = subslice_id.slice_uuid.uuid - subslice_context_uuid = subslice_id.context_id.context_uuid.uuid - str_subslice_key = key_to_str([subslice_context_uuid, subslice_uuid]) - str_slice_subslice_key = key_to_str([str_slice_key, str_subslice_key], separator='--') - SliceSubSliceModel(self.database, str_slice_subslice_key).delete() - updated = True - - event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE - db_slice : SliceModel = get_object(self.database, SliceModel, str_slice_key) - dict_slice_id = db_slice.dump_id() - notify_event(self.messagebroker, TOPIC_SLICE, event_type, {'slice_id': dict_slice_id}) - return SliceId(**dict_slice_id) - - @safe_and_metered_rpc_method(METRICS, LOGGER) - def RemoveSlice(self, request: SliceId, context : grpc.ServicerContext) -> Empty: - with self.lock: - context_uuid = request.context_id.context_uuid.uuid - slice_uuid = request.slice_uuid.uuid - db_slice = SliceModel(self.database, key_to_str([context_uuid, slice_uuid]), auto_load=False) - found = db_slice.load() - if not found: return Empty() - - dict_slice_id = db_slice.dump_id() - db_slice.delete() - - event_type = EventTypeEnum.EVENTTYPE_REMOVE - notify_event(self.messagebroker, TOPIC_SLICE, event_type, {'slice_id': dict_slice_id}) - return Empty() - - @safe_and_metered_rpc_method(METRICS, LOGGER) - def GetSliceEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[SliceEvent]: - for message in self.messagebroker.consume({TOPIC_SLICE}, consume_timeout=CONSUME_TIMEOUT): - yield SliceEvent(**json.loads(message.content)) - - - # ----- Connection ------------------------------------------------------------------------------------------------- - - @safe_and_metered_rpc_method(METRICS, LOGGER) - def ListConnectionIds(self, request: ServiceId, context : grpc.ServicerContext) -> ConnectionIdList: - with self.session() as session: - result = session.query(DeviceModel).all() - return DeviceIdList(device_ids=[device.dump_id() for device in result]) - - with self.lock: - str_key = key_to_str([request.context_id.context_uuid.uuid, request.service_uuid.uuid]) - db_service : ServiceModel = get_object(self.database, ServiceModel, str_key) - db_connections : Set[ConnectionModel] = get_related_objects(db_service, ConnectionModel) - db_connections = sorted(db_connections, key=operator.attrgetter('pk')) - return ConnectionIdList(connection_ids=[db_connection.dump_id() for db_connection in db_connections]) - - @safe_and_metered_rpc_method(METRICS, LOGGER) - def ListConnections(self, request: ContextId, context : grpc.ServicerContext) -> ServiceList: - with self.lock: - str_key = key_to_str([request.context_id.context_uuid.uuid, request.service_uuid.uuid]) - db_service : ServiceModel = get_object(self.database, ServiceModel, str_key) - db_connections : Set[ConnectionModel] = get_related_objects(db_service, ConnectionModel) - db_connections = sorted(db_connections, key=operator.attrgetter('pk')) - return ConnectionList(connections=[db_connection.dump() for db_connection in db_connections]) - - @safe_and_metered_rpc_method(METRICS, LOGGER) - def GetConnection(self, request: ConnectionId, context : grpc.ServicerContext) -> Connection: - with self.lock: - db_connection : ConnectionModel = get_object(self.database, ConnectionModel, request.connection_uuid.uuid) - return Connection(**db_connection.dump(include_path=True, include_sub_service_ids=True)) - - @safe_and_metered_rpc_method(METRICS, LOGGER) - def SetConnection(self, request: Connection, context : grpc.ServicerContext) -> ConnectionId: - with self.lock: - connection_uuid = request.connection_id.connection_uuid.uuid - - connection_attributes = {'connection_uuid': connection_uuid} - - service_context_uuid = request.service_id.context_id.context_uuid.uuid - service_uuid = request.service_id.service_uuid.uuid - if len(service_context_uuid) > 0 and len(service_uuid) > 0: - str_service_key = key_to_str([service_context_uuid, service_uuid]) - db_service : ServiceModel = get_object(self.database, ServiceModel, str_service_key) - connection_attributes['service_fk'] = db_service - - path_hops_result = set_path(self.database, connection_uuid, request.path_hops_endpoint_ids, path_name = '') - db_path = path_hops_result[0] - connection_attributes['path_fk'] = db_path - - result : Tuple[ConnectionModel, bool] = update_or_create_object( - self.database, ConnectionModel, connection_uuid, connection_attributes) - db_connection, updated = result - - for sub_service_id in request.sub_service_ids: - sub_service_uuid = sub_service_id.service_uuid.uuid - sub_service_context_uuid = sub_service_id.context_id.context_uuid.uuid - str_sub_service_key = key_to_str([sub_service_context_uuid, sub_service_uuid]) - db_service : ServiceModel = get_object(self.database, ServiceModel, str_sub_service_key) - - str_connection_sub_service_key = key_to_str([connection_uuid, str_sub_service_key], separator='--') - result : Tuple[ConnectionSubServiceModel, bool] = get_or_create_object( - self.database, ConnectionSubServiceModel, str_connection_sub_service_key, { - 'connection_fk': db_connection, 'sub_service_fk': db_service}) - #db_connection_sub_service, connection_sub_service_created = result - - event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE - dict_connection_id = db_connection.dump_id() - notify_event(self.messagebroker, TOPIC_CONNECTION, event_type, {'connection_id': dict_connection_id}) - return ConnectionId(**dict_connection_id) - - @safe_and_metered_rpc_method(METRICS, LOGGER) - def RemoveConnection(self, request: ConnectionId, context : grpc.ServicerContext) -> Empty: - with self.lock: - db_connection = ConnectionModel(self.database, request.connection_uuid.uuid, auto_load=False) - found = db_connection.load() - if not found: return Empty() - - dict_connection_id = db_connection.dump_id() - db_connection.delete() - - event_type = EventTypeEnum.EVENTTYPE_REMOVE - notify_event(self.messagebroker, TOPIC_CONNECTION, event_type, {'connection_id': dict_connection_id}) - return Empty() - - @safe_and_metered_rpc_method(METRICS, LOGGER) - def GetConnectionEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[ConnectionEvent]: - for message in self.messagebroker.consume({TOPIC_CONNECTION}, consume_timeout=CONSUME_TIMEOUT): - yield ConnectionEvent(**json.loads(message.content)) - - - # ----- Policy ----------------------------------------------------------------------------------------------------- - - @safe_and_metered_rpc_method(METRICS, LOGGER) - def ListPolicyRuleIds(self, request: Empty, context: grpc.ServicerContext) -> PolicyRuleIdList: - with self.lock: - db_policy_rules: List[PolicyRuleModel] = get_all_objects(self.database, PolicyRuleModel) - db_policy_rules = sorted(db_policy_rules, key=operator.attrgetter('pk')) - return PolicyRuleIdList(policyRuleIdList=[db_policy_rule.dump_id() for db_policy_rule in db_policy_rules]) - - @safe_and_metered_rpc_method(METRICS, LOGGER) - def ListPolicyRules(self, request: Empty, context: grpc.ServicerContext) -> PolicyRuleList: - with self.lock: - db_policy_rules: List[PolicyRuleModel] = get_all_objects(self.database, PolicyRuleModel) - db_policy_rules = sorted(db_policy_rules, key=operator.attrgetter('pk')) - return PolicyRuleList(policyRules=[db_policy_rule.dump() for db_policy_rule in db_policy_rules]) - - @safe_and_metered_rpc_method(METRICS, LOGGER) - def GetPolicyRule(self, request: PolicyRuleId, context: grpc.ServicerContext) -> PolicyRule: - with self.lock: - policy_rule_uuid = request.uuid.uuid - db_policy_rule: PolicyRuleModel = get_object(self.database, PolicyRuleModel, policy_rule_uuid) - return PolicyRule(**db_policy_rule.dump()) - - @safe_and_metered_rpc_method(METRICS, LOGGER) - def SetPolicyRule(self, request: PolicyRule, context: grpc.ServicerContext) -> PolicyRuleId: - with self.lock: - policy_rule_type = request.WhichOneof('policy_rule') - policy_rule_json = grpc_message_to_json(request) - policy_rule_uuid = policy_rule_json[policy_rule_type]['policyRuleBasic']['policyRuleId']['uuid']['uuid'] - result: Tuple[PolicyRuleModel, bool] = update_or_create_object( - self.database, PolicyRuleModel, policy_rule_uuid, {'value': json.dumps(policy_rule_json)}) - db_policy, updated = result # pylint: disable=unused-variable - - #event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE - dict_policy_id = db_policy.dump_id() - #notify_event(self.messagebroker, TOPIC_POLICY, event_type, {"policy_id": dict_policy_id}) - return PolicyRuleId(**dict_policy_id) - - @safe_and_metered_rpc_method(METRICS, LOGGER) - def RemovePolicyRule(self, request: PolicyRuleId, context: grpc.ServicerContext) -> Empty: - with self.lock: - policy_uuid = request.uuid.uuid - db_policy = PolicyRuleModel(self.database, policy_uuid, auto_load=False) - found = db_policy.load() - if not found: return Empty() - - dict_policy_id = db_policy.dump_id() - db_policy.delete() - #event_type = EventTypeEnum.EVENTTYPE_REMOVE - #notify_event(self.messagebroker, TOPIC_POLICY, event_type, {"policy_id": dict_policy_id}) - return Empty() diff --git a/src/context/tests/test_unitary.py b/src/context/tests/test_unitary.py index 67dd64fb3..aaa8c7fbd 100644 --- a/src/context/tests/test_unitary.py +++ b/src/context/tests/test_unitary.py @@ -37,13 +37,13 @@ from context.client.EventsCollector import EventsCollector from context.service.database.Tools import ( FASTHASHER_DATA_ACCEPTED_FORMAT, FASTHASHER_ITEM_ACCEPTED_FORMAT, fast_hasher) from context.service.grpc_server.ContextService import ContextService -from context.service.Populate import populate +from context.service._old_code.Populate import populate from context.service.rest_server.RestServer import RestServer from context.service.rest_server.Resources import RESOURCES from requests import Session from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker -from context.service.database.Base import Base +from context.service.database._Base import Base from .Objects import ( CONNECTION_R1_R3, CONNECTION_R1_R3_ID, CONNECTION_R1_R3_UUID, CONTEXT, CONTEXT_ID, DEVICE_R1, DEVICE_R1_ID, @@ -1294,134 +1294,6 @@ def test_grpc_policy( -# ----- Test REST API methods ------------------------------------------------------------------------------------------ - -def test_rest_populate_database( - context_db_mb : Tuple[Database, MessageBroker], # pylint: disable=redefined-outer-name - context_service_grpc : ContextService # pylint: disable=redefined-outer-name - ): - database = context_db_mb[0] - database.clear_all() - populate(LOCAL_HOST, GRPC_PORT) - -def test_rest_get_context_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - reply = do_rest_request('/context_ids') - validate_context_ids(reply) - -def test_rest_get_contexts(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - reply = do_rest_request('/contexts') - validate_contexts(reply) - -def test_rest_get_context(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) - reply = do_rest_request('/context/{:s}'.format(context_uuid)) - validate_context(reply) - -def test_rest_get_topology_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) - reply = do_rest_request('/context/{:s}/topology_ids'.format(context_uuid)) - validate_topology_ids(reply) - -def test_rest_get_topologies(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) - reply = do_rest_request('/context/{:s}/topologies'.format(context_uuid)) - validate_topologies(reply) - -def test_rest_get_topology(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) - topology_uuid = urllib.parse.quote(DEFAULT_TOPOLOGY_UUID) - reply = do_rest_request('/context/{:s}/topology/{:s}'.format(context_uuid, topology_uuid)) - validate_topology(reply, num_devices=3, num_links=3) - -def test_rest_get_service_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) - reply = do_rest_request('/context/{:s}/service_ids'.format(context_uuid)) - validate_service_ids(reply) - -def test_rest_get_services(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) - reply = do_rest_request('/context/{:s}/services'.format(context_uuid)) - validate_services(reply) - -def test_rest_get_service(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) - service_uuid = urllib.parse.quote(SERVICE_R1_R2_UUID, safe='') - reply = do_rest_request('/context/{:s}/service/{:s}'.format(context_uuid, service_uuid)) - validate_service(reply) - -def test_rest_get_slice_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) - reply = do_rest_request('/context/{:s}/slice_ids'.format(context_uuid)) - #validate_slice_ids(reply) - -def test_rest_get_slices(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) - reply = do_rest_request('/context/{:s}/slices'.format(context_uuid)) - #validate_slices(reply) - -#def test_rest_get_slice(context_service_rest : RestServer): # pylint: disable=redefined-outer-name -# context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) -# slice_uuid = urllib.parse.quote(SLICE_R1_R2_UUID, safe='') -# reply = do_rest_request('/context/{:s}/slice/{:s}'.format(context_uuid, slice_uuid)) -# #validate_slice(reply) - -def test_rest_get_device_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - reply = do_rest_request('/device_ids') - validate_device_ids(reply) - -def test_rest_get_devices(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - reply = do_rest_request('/devices') - validate_devices(reply) - -def test_rest_get_device(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - device_uuid = urllib.parse.quote(DEVICE_R1_UUID, safe='') - reply = do_rest_request('/device/{:s}'.format(device_uuid)) - validate_device(reply) - -def test_rest_get_link_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - reply = do_rest_request('/link_ids') - validate_link_ids(reply) - -def test_rest_get_links(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - reply = do_rest_request('/links') - validate_links(reply) - -def test_rest_get_link(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - link_uuid = urllib.parse.quote(LINK_R1_R2_UUID, safe='') - reply = do_rest_request('/link/{:s}'.format(link_uuid)) - validate_link(reply) - -def test_rest_get_connection_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) - service_uuid = urllib.parse.quote(SERVICE_R1_R3_UUID, safe='') - reply = do_rest_request('/context/{:s}/service/{:s}/connection_ids'.format(context_uuid, service_uuid)) - validate_connection_ids(reply) - -def test_rest_get_connections(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) - service_uuid = urllib.parse.quote(SERVICE_R1_R3_UUID, safe='') - reply = do_rest_request('/context/{:s}/service/{:s}/connections'.format(context_uuid, service_uuid)) - validate_connections(reply) - -def test_rest_get_connection(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - connection_uuid = urllib.parse.quote(CONNECTION_R1_R3_UUID, safe='') - reply = do_rest_request('/connection/{:s}'.format(connection_uuid)) - validate_connection(reply) - -def test_rest_get_policyrule_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - reply = do_rest_request('/policyrule_ids') - #validate_policyrule_ids(reply) - -def test_rest_get_policyrules(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - reply = do_rest_request('/policyrules') - #validate_policyrules(reply) - -#def test_rest_get_policyrule(context_service_rest : RestServer): # pylint: disable=redefined-outer-name -# policyrule_uuid = urllib.parse.quote(POLICYRULE_UUID, safe='') -# reply = do_rest_request('/policyrule/{:s}'.format(policyrule_uuid)) -# #validate_policyrule(reply) - - # ----- Test misc. Context internal tools ------------------------------------------------------------------------------ def test_tools_fast_string_hasher(): -- GitLab From 817f5f08825e4999257b85823e6fac8af5dafa17 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Fri, 16 Dec 2022 00:04:53 +0100 Subject: [PATCH 013/158] Proto: - added field name in context - added field name in topology - added field name in device - added field name in link - added field name in service - added field name in slice --- proto/context.proto | 73 +++++++++++---------- src/common/Constants.py | 13 ++-- src/common/tools/object_factory/Context.py | 9 ++- src/common/tools/object_factory/Topology.py | 6 +- 4 files changed, 58 insertions(+), 43 deletions(-) diff --git a/proto/context.proto b/proto/context.proto index 3f0532d23..db0c81381 100644 --- a/proto/context.proto +++ b/proto/context.proto @@ -101,9 +101,11 @@ message ContextId { message Context { ContextId context_id = 1; - repeated TopologyId topology_ids = 2; - repeated ServiceId service_ids = 3; - TeraFlowController controller = 4; + string name = 2; + repeated TopologyId topology_ids = 3; + repeated ServiceId service_ids = 4; + repeated SliceId slice_ids = 5; + TeraFlowController controller = 6; } message ContextIdList { @@ -128,8 +130,9 @@ message TopologyId { message Topology { TopologyId topology_id = 1; - repeated DeviceId device_ids = 2; - repeated LinkId link_ids = 3; + string name = 2; + repeated DeviceId device_ids = 3; + repeated LinkId link_ids = 4; } message TopologyIdList { @@ -153,12 +156,13 @@ message DeviceId { message Device { DeviceId device_id = 1; - string device_type = 2; - DeviceConfig device_config = 3; - DeviceOperationalStatusEnum device_operational_status = 4; - repeated DeviceDriverEnum device_drivers = 5; - repeated EndPoint device_endpoints = 6; - repeated Component component = 7; // Used for inventory + string name = 2; + string device_type = 3; + DeviceConfig device_config = 4; + DeviceOperationalStatusEnum device_operational_status = 5; + repeated DeviceDriverEnum device_drivers = 6; + repeated EndPoint device_endpoints = 7; + repeated Component component = 8; // Used for inventory } message Component { @@ -207,7 +211,8 @@ message LinkId { message Link { LinkId link_id = 1; - repeated EndPointId link_endpoint_ids = 2; + string name = 2; + repeated EndPointId link_endpoint_ids = 3; } message LinkIdList { @@ -232,12 +237,13 @@ message ServiceId { message Service { ServiceId service_id = 1; - ServiceTypeEnum service_type = 2; - repeated EndPointId service_endpoint_ids = 3; - repeated Constraint service_constraints = 4; - ServiceStatus service_status = 5; - ServiceConfig service_config = 6; - Timestamp timestamp = 7; + string name = 2; + ServiceTypeEnum service_type = 3; + repeated EndPointId service_endpoint_ids = 4; + repeated Constraint service_constraints = 5; + ServiceStatus service_status = 6; + ServiceConfig service_config = 7; + Timestamp timestamp = 8; } enum ServiceTypeEnum { @@ -284,14 +290,15 @@ message SliceId { message Slice { SliceId slice_id = 1; - repeated EndPointId slice_endpoint_ids = 2; - repeated Constraint slice_constraints = 3; - repeated ServiceId slice_service_ids = 4; - repeated SliceId slice_subslice_ids = 5; - SliceStatus slice_status = 6; - SliceConfig slice_config = 7; - SliceOwner slice_owner = 8; - Timestamp timestamp = 9; + string name = 2; + repeated EndPointId slice_endpoint_ids = 3; + repeated Constraint slice_constraints = 4; + repeated ServiceId slice_service_ids = 5; + repeated SliceId slice_subslice_ids = 6; + SliceStatus slice_status = 7; + SliceConfig slice_config = 8; + SliceOwner slice_owner = 9; + Timestamp timestamp = 10; } message SliceOwner { @@ -300,11 +307,11 @@ message SliceOwner { } enum SliceStatusEnum { - SLICESTATUS_UNDEFINED = 0; - SLICESTATUS_PLANNED = 1; - SLICESTATUS_INIT = 2; - SLICESTATUS_ACTIVE = 3; - SLICESTATUS_DEINIT = 4; + SLICESTATUS_UNDEFINED = 0; + SLICESTATUS_PLANNED = 1; + SLICESTATUS_INIT = 2; + SLICESTATUS_ACTIVE = 3; + SLICESTATUS_DEINIT = 4; SLICESTATUS_SLA_VIOLATED = 5; } @@ -409,8 +416,8 @@ message EndPoint { // ----- Configuration ------------------------------------------------------------------------------------------------- enum ConfigActionEnum { CONFIGACTION_UNDEFINED = 0; - CONFIGACTION_SET = 1; - CONFIGACTION_DELETE = 2; + CONFIGACTION_SET = 1; + CONFIGACTION_DELETE = 2; } message ConfigRule_Custom { diff --git a/src/common/Constants.py b/src/common/Constants.py index 5558ef25d..d606c0d03 100644 --- a/src/common/Constants.py +++ b/src/common/Constants.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging +import logging, uuid from enum import Enum # Default logging level @@ -30,11 +30,12 @@ DEFAULT_HTTP_BIND_ADDRESS = '0.0.0.0' DEFAULT_METRICS_PORT = 9192 # Default context and topology UUIDs -#DEFAULT_CONTEXT_UUID = '85f78267-4c5e-4f80-ad2f-7fbaca7c62a0' -#DEFAULT_TOPOLOGY_UUID = '85f78267-4c5e-4f80-ad2f-7fbaca7c62a0' -DEFAULT_CONTEXT_UUID = 'admin' -DEFAULT_TOPOLOGY_UUID = 'admin' # contains the detailed local topology -INTERDOMAIN_TOPOLOGY_UUID = 'inter' # contains the abstract inter-domain topology +DEFAULT_CONTEXT_NAME = 'admin' +DEFAULT_TOPOLOGY_NAME = 'admin' # contains the detailed local topology +INTERDOMAIN_TOPOLOGY_NAME = 'inter' # contains the abstract inter-domain topology +DEFAULT_CONTEXT_UUID = str(uuid.uuid5(uuid.NAMESPACE_OID, DEFAULT_CONTEXT_NAME )) +DEFAULT_TOPOLOGY_UUID = str(uuid.uuid5(uuid.NAMESPACE_OID, DEFAULT_TOPOLOGY_NAME )) +INTERDOMAIN_TOPOLOGY_UUID = str(uuid.uuid5(uuid.NAMESPACE_OID, INTERDOMAIN_TOPOLOGY_NAME)) # Default service names class ServiceNameEnum(Enum): diff --git a/src/common/tools/object_factory/Context.py b/src/common/tools/object_factory/Context.py index d5d1bf943..58f35b929 100644 --- a/src/common/tools/object_factory/Context.py +++ b/src/common/tools/object_factory/Context.py @@ -12,12 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Optional + def json_context_id(context_uuid : str): return {'context_uuid': {'uuid': context_uuid}} -def json_context(context_uuid : str): - return { +def json_context(context_uuid : str, name : Optional[str] = None): + result = { 'context_id' : json_context_id(context_uuid), 'topology_ids': [], 'service_ids' : [], + 'slice_ids' : [], } + if name is not None: result['name'] = name + return result diff --git a/src/common/tools/object_factory/Topology.py b/src/common/tools/object_factory/Topology.py index 7de4a1d57..5f7a42d7a 100644 --- a/src/common/tools/object_factory/Topology.py +++ b/src/common/tools/object_factory/Topology.py @@ -20,9 +20,11 @@ def json_topology_id(topology_uuid : str, context_id : Optional[Dict] = None): if context_id is not None: result['context_id'] = copy.deepcopy(context_id) return result -def json_topology(topology_uuid : str, context_id : Optional[Dict] = None): - return { +def json_topology(topology_uuid : str, name : Optional[str] = None, context_id : Optional[Dict] = None): + result = { 'topology_id': json_topology_id(topology_uuid, context_id=context_id), 'device_ids' : [], 'link_ids' : [], } + if name is not None: result['name'] = name + return result -- GitLab From 6cf2056a321c1751f24b4383dedff9b15133d56e Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Fri, 16 Dec 2022 00:09:13 +0100 Subject: [PATCH 014/158] Context component: - updatd EventsCollector get_events - added field created_at in ContextModel - added ChangeFeedClient - WIP arrangements in unitary tests - WIP arrangements in ServicerImpl - arranged run_tests_locally script --- scripts/run_tests_locally-context.sh | 20 +- src/context/client/EventsCollector.py | 2 +- src/context/service/ChangeFeedClient.py | 87 ++++++ .../service/ContextServiceServicerImpl.py | 111 ++++--- src/context/service/Database.py | 23 +- src/context/service/Engine.py | 8 +- src/context/service/database/ConfigModel.py | 2 +- src/context/service/database/ContextModel.py | 3 +- src/context/service/database/__init__.py | 1 + src/context/tests/test_unitary.py | 293 ++++++++++-------- 10 files changed, 356 insertions(+), 194 deletions(-) create mode 100644 src/context/service/ChangeFeedClient.py diff --git a/scripts/run_tests_locally-context.sh b/scripts/run_tests_locally-context.sh index 7033fcb01..bf0cccd6b 100755 --- a/scripts/run_tests_locally-context.sh +++ b/scripts/run_tests_locally-context.sh @@ -20,7 +20,7 @@ # If not already set, set the name of the Kubernetes namespace to deploy to. export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"} -export TFS_K8S_HOSTNAME="tfs-vm" +#export TFS_K8S_HOSTNAME="tfs-vm" ######################################################################################################################## # Automated steps start here @@ -29,15 +29,21 @@ export TFS_K8S_HOSTNAME="tfs-vm" PROJECTDIR=`pwd` cd $PROJECTDIR/src -RCFILE=$PROJECTDIR/coverage/.coveragerc +#RCFILE=$PROJECTDIR/coverage/.coveragerc -kubectl --namespace $TFS_K8S_NAMESPACE expose deployment contextservice --name=redis-tests --port=6379 --type=NodePort +#kubectl --namespace $TFS_K8S_NAMESPACE expose deployment contextservice --name=redis-tests --port=6379 --type=NodePort #export REDIS_SERVICE_HOST=$(kubectl --namespace $TFS_K8S_NAMESPACE get service redis-tests -o 'jsonpath={.spec.clusterIP}') -export REDIS_SERVICE_HOST=$(kubectl get node $TFS_K8S_HOSTNAME -o 'jsonpath={.status.addresses[?(@.type=="InternalIP")].address}') -export REDIS_SERVICE_PORT=$(kubectl --namespace $TFS_K8S_NAMESPACE get service redis-tests -o 'jsonpath={.spec.ports[?(@.port==6379)].nodePort}') +#export REDIS_SERVICE_HOST=$(kubectl get node $TFS_K8S_HOSTNAME -o 'jsonpath={.status.addresses[?(@.type=="InternalIP")].address}') +#export REDIS_SERVICE_PORT=$(kubectl --namespace $TFS_K8S_NAMESPACE get service redis-tests -o 'jsonpath={.spec.ports[?(@.port==6379)].nodePort}') + +export CRDB_URI="cockroachdb://tfs:tfs123@127.0.0.1:26257/tfs?sslmode=require" # Run unitary tests and analyze coverage of code at same time -coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose --maxfail=1 \ +#coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose --maxfail=1 \ +# context/tests/test_unitary.py + +source tfs_runtime_env_vars.sh +pytest --log-level=INFO --verbose -o log_cli=true --maxfail=1 \ context/tests/test_unitary.py -kubectl --namespace $TFS_K8S_NAMESPACE delete service redis-tests +#kubectl --namespace $TFS_K8S_NAMESPACE delete service redis-tests diff --git a/src/context/client/EventsCollector.py b/src/context/client/EventsCollector.py index f5fc3fbc7..9ad6e101b 100644 --- a/src/context/client/EventsCollector.py +++ b/src/context/client/EventsCollector.py @@ -132,7 +132,7 @@ class EventsCollector: if event is None: break events.append(event) else: - for _ in range(count): + while len(events) < count: if self._terminate.is_set(): break event = self.get_event(block=block, timeout=timeout) if event is None: continue diff --git a/src/context/service/ChangeFeedClient.py b/src/context/service/ChangeFeedClient.py new file mode 100644 index 000000000..8285dc6c3 --- /dev/null +++ b/src/context/service/ChangeFeedClient.py @@ -0,0 +1,87 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# pip install psycopg==3.1.6 +# Ref: https://www.cockroachlabs.com/docs/stable/changefeed-for.html +# (current implementation) Ref: https://www.cockroachlabs.com/docs/v22.1/changefeed-for +# Ref: https://www.psycopg.org/psycopg3/docs/api/crdb.html + +import contextlib, json, logging, psycopg, psycopg.conninfo, psycopg.crdb, sys, time +from typing import Any, Dict, Iterator, List, Optional, Tuple +from common.Settings import get_setting + +LOGGER = logging.getLogger(__name__) + +SQL_ACTIVATE_CHANGE_FEED = 'SET CLUSTER SETTING kv.rangefeed.enabled = true' +SQL_START_CHANGE_FEED = 'EXPERIMENTAL CHANGEFEED FOR {:s}.{:s} WITH format=json, no_initial_scan, updated' + +class ChangeFeedClient: + def __init__(self) -> None: + self._connection : Optional[psycopg.crdb.CrdbConnection] = None + self._conn_info_dict : Dict = dict() + self._is_crdb : bool = False + + def initialize(self) -> bool: + crdb_uri = get_setting('CRDB_URI') + if crdb_uri is None: + LOGGER.error('Connection string not found in EnvVar CRDB_URI') + return False + + try: + crdb_uri = crdb_uri.replace('cockroachdb://', 'postgres://') + self._conn_info_dict = psycopg.conninfo.conninfo_to_dict(crdb_uri) + except psycopg.ProgrammingError: + LOGGER.exception('Invalid connection string: {:s}'.format(str(crdb_uri))) + return False + + self._connection = psycopg.crdb.connect(**self._conn_info_dict) + self._is_crdb = psycopg.crdb.CrdbConnection.is_crdb(self._connection) + LOGGER.debug('is_crdb = {:s}'.format(str(self._is_crdb))) + + # disable multi-statement transactions + self._connection.autocommit = True + + # activate change feeds + self._connection.execute(SQL_ACTIVATE_CHANGE_FEED) + + return self._is_crdb + + def get_changes(self, table_name : str) -> Iterator[Tuple[float, str, List[Any], bool, Dict]]: + db_name = self._conn_info_dict.get('dbname') + if db_name is None: raise Exception('ChangeFeed has not been initialized!') + cur = self._connection.cursor() + str_sql_query = SQL_START_CHANGE_FEED.format(db_name, table_name) + with contextlib.closing(cur.stream(str_sql_query)) as feed: + for change in feed: + LOGGER.info(change) + table_name, primary_key, data = change[0], json.loads(change[1]), json.loads(change[2]) + timestamp = data.get('updated') / 1.e9 + if timestamp is None: timestamp = time.time() + after = data.get('after') + is_delete = ('after' in data) and (after is None) + yield timestamp, table_name, primary_key, is_delete, after + +def main(): + logging.basicConfig(level=logging.INFO) + + cf = ChangeFeed() + ready = cf.initialize() + if not ready: raise Exception('Unable to initialize ChangeFeed') + for change in cf.get_changes('context'): + LOGGER.info(change) + + return 0 + +if __name__ == '__main__': + sys.exit(main()) diff --git a/src/context/service/ContextServiceServicerImpl.py b/src/context/service/ContextServiceServicerImpl.py index b5725f007..fcb0024d2 100644 --- a/src/context/service/ContextServiceServicerImpl.py +++ b/src/context/service/ContextServiceServicerImpl.py @@ -13,13 +13,13 @@ # limitations under the License. -import grpc, json, logging, operator, sqlalchemy, threading, uuid +import grpc, json, logging, operator, sqlalchemy, threading, time, uuid from sqlalchemy.orm import Session, contains_eager, selectinload, sessionmaker -from sqlalchemy.dialects.postgresql import UUID, insert +#from sqlalchemy.dialects.postgresql import UUID, insert from sqlalchemy_cockroachdb import run_transaction from typing import Dict, Iterator, List, Optional, Set, Tuple, Union from common.message_broker.MessageBroker import MessageBroker -from common.orm.backend.Tools import key_to_str +#from common.orm.backend.Tools import key_to_str from common.proto.context_pb2 import ( Connection, ConnectionEvent, ConnectionId, ConnectionIdList, ConnectionList, Context, ContextEvent, ContextId, ContextIdList, ContextList, @@ -30,36 +30,39 @@ from common.proto.context_pb2 import ( Slice, SliceEvent, SliceId, SliceIdList, SliceList, Topology, TopologyEvent, TopologyId, TopologyIdList, TopologyList, ConfigActionEnum, Constraint) -from common.proto.policy_pb2 import PolicyRuleIdList, PolicyRuleId, PolicyRuleList, PolicyRule +#from common.proto.policy_pb2 import PolicyRuleIdList, PolicyRuleId, PolicyRuleList, PolicyRule from common.proto.context_pb2_grpc import ContextServiceServicer from common.proto.context_policy_pb2_grpc import ContextPolicyServiceServicer +from common.tools.object_factory.Context import json_context_id from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method -from common.rpc_method_wrapper.ServiceExceptions import InvalidArgumentException, NotFoundException -from common.tools.grpc.Tools import grpc_message_to_json, grpc_message_to_json_string -from context.service.Database import Database -from context.service.database.ConfigModel import ( - ConfigModel, ORM_ConfigActionEnum, ConfigRuleModel, grpc_config_rules_to_raw, update_config) -from context.service.database.ConnectionModel import ConnectionModel, set_path -from context.service.database.ConstraintModel import ( - ConstraintModel, ConstraintsModel, Union_ConstraintModel, CONSTRAINT_PARSERS, set_constraints) +from common.rpc_method_wrapper.ServiceExceptions import ( + InvalidArgumentException, NotFoundException, OperationFailedException) +#from common.tools.grpc.Tools import grpc_message_to_json, grpc_message_to_json_string +#from context.service.Database import Database +#from context.service.database.ConfigModel import ( +# ConfigModel, ORM_ConfigActionEnum, ConfigRuleModel, grpc_config_rules_to_raw, update_config) +#from context.service.database.ConnectionModel import ConnectionModel, set_path +#from context.service.database.ConstraintModel import ( +# ConstraintModel, ConstraintsModel, Union_ConstraintModel, CONSTRAINT_PARSERS, set_constraints) from context.service.database.ContextModel import ContextModel -from context.service.database.DeviceModel import ( - DeviceModel, grpc_to_enum__device_operational_status, set_drivers, grpc_to_enum__device_driver, DriverModel) -from context.service.database.EndPointModel import EndPointModel, KpiSampleTypeModel, set_kpi_sample_types -from context.service.database.Events import notify_event -from context.service.database.KpiSampleType import grpc_to_enum__kpi_sample_type -from context.service.database.LinkModel import LinkModel -from context.service.database.PolicyRuleModel import PolicyRuleModel -from context.service.database.RelationModels import ( - ConnectionSubServiceModel, LinkEndPointModel, ServiceEndPointModel, SliceEndPointModel, SliceServiceModel, - SliceSubSliceModel, TopologyDeviceModel, TopologyLinkModel) -from context.service.database.ServiceModel import ( - ServiceModel, grpc_to_enum__service_status, grpc_to_enum__service_type) -from context.service.database.SliceModel import SliceModel, grpc_to_enum__slice_status -from context.service.database.TopologyModel import TopologyModel -from .Constants import ( - CONSUME_TIMEOUT, TOPIC_CONNECTION, TOPIC_CONTEXT, TOPIC_DEVICE, TOPIC_LINK, TOPIC_SERVICE, TOPIC_SLICE, - TOPIC_TOPOLOGY) +#from context.service.database.DeviceModel import ( +# DeviceModel, grpc_to_enum__device_operational_status, set_drivers, grpc_to_enum__device_driver, DriverModel) +#from context.service.database.EndPointModel import EndPointModel, KpiSampleTypeModel, set_kpi_sample_types +#from context.service.database.Events import notify_event +#from context.service.database.KpiSampleType import grpc_to_enum__kpi_sample_type +#from context.service.database.LinkModel import LinkModel +#from context.service.database.PolicyRuleModel import PolicyRuleModel +#from context.service.database.RelationModels import ( +# ConnectionSubServiceModel, LinkEndPointModel, ServiceEndPointModel, SliceEndPointModel, SliceServiceModel, +# SliceSubSliceModel, TopologyDeviceModel, TopologyLinkModel) +#from context.service.database.ServiceModel import ( +# ServiceModel, grpc_to_enum__service_status, grpc_to_enum__service_type) +#from context.service.database.SliceModel import SliceModel, grpc_to_enum__slice_status +#from context.service.database.TopologyModel import TopologyModel +#from .Constants import ( +# CONSUME_TIMEOUT, TOPIC_CONNECTION, TOPIC_CONTEXT, TOPIC_DEVICE, TOPIC_LINK, TOPIC_SERVICE, TOPIC_SLICE, +# TOPIC_TOPOLOGY) +from .ChangeFeedClient import ChangeFeedClient LOGGER = logging.getLogger(__name__) @@ -106,7 +109,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer @safe_and_metered_rpc_method(METRICS, LOGGER) def GetContext(self, request: ContextId, context : grpc.ServicerContext) -> Context: - context_uuid = str(uuid.uuid5(uuid.NAMESPACE_OID, request.context_uuid.uuid)) + context_uuid = request.context_uuid.uuid def callback(session : Session) -> Optional[Dict]: obj : Optional[ContextModel] = \ session.query(ContextModel).filter_by(context_uuid=context_uuid).one_or_none() @@ -117,8 +120,8 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer @safe_and_metered_rpc_method(METRICS, LOGGER) def SetContext(self, request: Context, context : grpc.ServicerContext) -> ContextId: - context_uuid = str(uuid.uuid5(uuid.NAMESPACE_OID, request.context_id.context_uuid.uuid)) - context_name = request.context_id.context_uuid.uuid + context_uuid = request.context_id.context_uuid.uuid + context_name = request.name for i, topology_id in enumerate(request.topology_ids): topology_context_uuid = topology_id.context_id.context_uuid.uuid @@ -134,15 +137,24 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer 'request.service_ids[{:d}].context_id.context_uuid.uuid'.format(i), service_context_uuid, ['should be == {:s}({:s})'.format('request.context_id.context_uuid.uuid', context_uuid)]) + for i, slice_id in enumerate(request.slice_ids): + slice_context_uuid = slice_id.context_id.context_uuid.uuid + if slice_context_uuid != context_uuid: + raise InvalidArgumentException( + 'request.slice_ids[{:d}].context_id.context_uuid.uuid'.format(i), slice_context_uuid, + ['should be == {:s}({:s})'.format('request.context_id.context_uuid.uuid', context_uuid)]) + def callback(session : Session) -> Tuple[Optional[Dict], bool]: obj : Optional[ContextModel] = \ session.query(ContextModel).with_for_update().filter_by(context_uuid=context_uuid).one_or_none() - updated = obj is not None - obj = ContextModel(context_uuid=context_uuid, context_name=context_name) - session.merge(obj) - session.commit() + is_update = obj is not None + if is_update: + obj.context_name = context_name + session.merge(obj) + else: + session.add(ContextModel(context_uuid=context_uuid, context_name=context_name, created_at=time.time())) obj = session.get(ContextModel, {'context_uuid': context_uuid}) - return (None if obj is None else obj.dump_id()), updated + return (None if obj is None else obj.dump_id()), is_update obj_id,updated = run_transaction(sessionmaker(bind=self.db_engine), callback) if obj_id is None: raise NotFoundException(ContextModel.__name__.replace('Model', ''), context_uuid) @@ -153,7 +165,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer @safe_and_metered_rpc_method(METRICS, LOGGER) def RemoveContext(self, request: ContextId, context : grpc.ServicerContext) -> Empty: - context_uuid = str(uuid.uuid5(uuid.NAMESPACE_OID, request.context_uuid.uuid)) + context_uuid = request.context_uuid.uuid def callback(session : Session) -> bool: num_deleted = session.query(ContextModel).filter_by(context_uuid=context_uuid).delete() @@ -164,11 +176,24 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # notify_event(self.messagebroker, TOPIC_CONTEXT, EventTypeEnum.EVENTTYPE_REMOVE, {'context_id': request}) return Empty() -# @safe_and_metered_rpc_method(METRICS, LOGGER) -# def GetContextEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[ContextEvent]: -# for message in self.messagebroker.consume({TOPIC_CONTEXT}, consume_timeout=CONSUME_TIMEOUT): -# yield ContextEvent(**json.loads(message.content)) - + @safe_and_metered_rpc_method(METRICS, LOGGER) + def GetContextEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[ContextEvent]: + #for message in self.messagebroker.consume({TOPIC_CONTEXT}, consume_timeout=CONSUME_TIMEOUT): + # yield ContextEvent(**json.loads(message.content)) + cf = ChangeFeedClient() + ready = cf.initialize() + if not ready: raise OperationFailedException('Initialize ChangeFeed') + for timestamp, _, primary_key, is_delete, after in cf.get_changes('context'): + if is_delete: + event_type = EventTypeEnum.EVENTTYPE_REMOVE + else: + is_create = (timestamp - after.get('created_at')) < 1.0 + event_type = EventTypeEnum.EVENTTYPE_CREATE if is_create else EventTypeEnum.EVENTTYPE_UPDATE + event = { + 'event': {'timestamp': {'timestamp': timestamp}, 'event_type': event_type}, + 'context_id': json_context_id(primary_key[0]), + } + yield ContextEvent(**event) # ----- Topology --------------------------------------------------------------------------------------------------- diff --git a/src/context/service/Database.py b/src/context/service/Database.py index 8aa568239..03598a97f 100644 --- a/src/context/service/Database.py +++ b/src/context/service/Database.py @@ -1,16 +1,13 @@ -from typing import Tuple, List - -from sqlalchemy import MetaData -from sqlalchemy.orm import Session, joinedload -from context.service.database._Base import Base import logging -from common.orm.backend.Tools import key_to_str - +from sqlalchemy import MetaData +from sqlalchemy.orm import Session #, joinedload +from typing import Tuple #, List +from context.service.database._Base import _Base +#from common.orm.backend.Tools import key_to_str from common.rpc_method_wrapper.ServiceExceptions import NotFoundException LOGGER = logging.getLogger(__name__) - class Database(Session): def __init__(self, session): super().__init__() @@ -62,8 +59,8 @@ class Database(Session): def clear(self): with self.session() as session: engine = session.get_bind() - Base.metadata.drop_all(engine) - Base.metadata.create_all(engine) + _Base.metadata.drop_all(engine) + _Base.metadata.create_all(engine) def dump_by_table(self): with self.session() as session: @@ -90,7 +87,7 @@ class Database(Session): return result - def get_object(self, model_class: Base, main_key: str, raise_if_not_found=False): + def get_object(self, model_class: _Base, main_key: str, raise_if_not_found=False): filt = {model_class.main_pk_name(): main_key} with self.session() as session: get = session.query(model_class).filter_by(**filt).one_or_none() @@ -104,7 +101,7 @@ class Database(Session): dump = get.dump() return get, dump - def get_object_filter(self, model_class: Base, filt, raise_if_not_found=False): + def get_object_filter(self, model_class: _Base, filt, raise_if_not_found=False): with self.session() as session: get = session.query(model_class).filter_by(**filt).all() @@ -119,7 +116,7 @@ class Database(Session): return get, get.dump() - def get_or_create(self, model_class: Base, key_parts: str, filt=None) -> Tuple[Base, bool]: + def get_or_create(self, model_class: _Base, key_parts: str, filt=None) -> Tuple[_Base, bool]: if not filt: filt = {model_class.main_pk_name(): key_parts} with self.session() as session: diff --git a/src/context/service/Engine.py b/src/context/service/Engine.py index 7944d8601..08e1e4f93 100644 --- a/src/context/service/Engine.py +++ b/src/context/service/Engine.py @@ -21,20 +21,20 @@ APP_NAME = 'tfs' class Engine: def get_engine(self) -> sqlalchemy.engine.Engine: - ccdb_url = get_setting('CCDB_URL') + crdb_uri = get_setting('CRDB_URI') try: engine = sqlalchemy.create_engine( - ccdb_url, connect_args={'application_name': APP_NAME}, echo=False, future=True) + crdb_uri, connect_args={'application_name': APP_NAME}, echo=False, future=True) except: # pylint: disable=bare-except - LOGGER.exception('Failed to connect to database: {:s}'.format(ccdb_url)) + LOGGER.exception('Failed to connect to database: {:s}'.format(crdb_uri)) return None try: if not sqlalchemy_utils.database_exists(engine.url): sqlalchemy_utils.create_database(engine.url) except: # pylint: disable=bare-except - LOGGER.exception('Failed to check/create to database: {:s}'.format(ccdb_url)) + LOGGER.exception('Failed to check/create to database: {:s}'.format(crdb_uri)) return None return engine diff --git a/src/context/service/database/ConfigModel.py b/src/context/service/database/ConfigModel.py index 5f7111981..d36622e76 100644 --- a/src/context/service/database/ConfigModel.py +++ b/src/context/service/database/ConfigModel.py @@ -19,7 +19,7 @@ from common.proto.context_pb2 import ConfigActionEnum from common.tools.grpc.Tools import grpc_message_to_json_string from sqlalchemy import Column, ForeignKey, INTEGER, CheckConstraint, Enum, String from sqlalchemy.dialects.postgresql import UUID, ARRAY -from context.service.database._Base import Base +from context.service.database._Base import _Base from sqlalchemy.orm import relationship from context.service.Database import Database diff --git a/src/context/service/database/ContextModel.py b/src/context/service/database/ContextModel.py index 46f0741e5..9ad5e0bcb 100644 --- a/src/context/service/database/ContextModel.py +++ b/src/context/service/database/ContextModel.py @@ -14,7 +14,7 @@ import logging from typing import Dict -from sqlalchemy import Column, String +from sqlalchemy import Column, Float, String from sqlalchemy.dialects.postgresql import UUID from ._Base import _Base #from sqlalchemy.orm import relationship @@ -25,6 +25,7 @@ class ContextModel(_Base): __tablename__ = 'context' context_uuid = Column(UUID(as_uuid=False), primary_key=True) context_name = Column(String(), nullable=False) + created_at = Column(Float) #topology = relationship('TopologyModel', back_populates='context') diff --git a/src/context/service/database/__init__.py b/src/context/service/database/__init__.py index 27b5f5dd2..980265786 100644 --- a/src/context/service/database/__init__.py +++ b/src/context/service/database/__init__.py @@ -13,3 +13,4 @@ # limitations under the License. from ._Base import _Base, rebuild_database +from .ContextModel import ContextModel diff --git a/src/context/tests/test_unitary.py b/src/context/tests/test_unitary.py index aaa8c7fbd..8bf1b4ff1 100644 --- a/src/context/tests/test_unitary.py +++ b/src/context/tests/test_unitary.py @@ -13,7 +13,7 @@ # limitations under the License. # pylint: disable=too-many-lines -import copy, grpc, logging, os, pytest, requests, time, urllib +import copy, grpc, logging, os, pytest, requests, sqlalchemy, time, urllib, uuid from typing import Tuple from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID, ServiceNameEnum from common.Settings import ( @@ -27,6 +27,10 @@ from common.proto.context_pb2 import ( DeviceOperationalStatusEnum, Empty, EventTypeEnum, Link, LinkEvent, LinkId, Service, ServiceEvent, ServiceId, ServiceStatusEnum, ServiceTypeEnum, Topology, TopologyEvent, TopologyId) from common.proto.policy_pb2 import (PolicyRuleIdList, PolicyRuleId, PolicyRuleList, PolicyRule) +from common.tools.object_factory.Context import json_context, json_context_id +from common.tools.object_factory.Service import json_service_id +from common.tools.object_factory.Slice import json_slice_id +from common.tools.object_factory.Topology import json_topology_id from common.type_checkers.Assertions import ( validate_connection, validate_connection_ids, validate_connections, validate_context, validate_context_ids, validate_contexts, validate_device, validate_device_ids, validate_devices, validate_link, validate_link_ids, @@ -36,14 +40,17 @@ from context.client.ContextClient import ContextClient from context.client.EventsCollector import EventsCollector from context.service.database.Tools import ( FASTHASHER_DATA_ACCEPTED_FORMAT, FASTHASHER_ITEM_ACCEPTED_FORMAT, fast_hasher) -from context.service.grpc_server.ContextService import ContextService -from context.service._old_code.Populate import populate -from context.service.rest_server.RestServer import RestServer -from context.service.rest_server.Resources import RESOURCES +from context.service.ContextService import ContextService +#from context.service._old_code.Populate import populate +#from context.service.rest_server.RestServer import RestServer +#from context.service.rest_server.Resources import RESOURCES from requests import Session from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker -from context.service.database._Base import Base +from context.service.database._Base import _Base +from common.Settings import get_setting +from context.service.Engine import Engine +from context.service.database._Base import rebuild_database from .Objects import ( CONNECTION_R1_R3, CONNECTION_R1_R3_ID, CONNECTION_R1_R3_UUID, CONTEXT, CONTEXT_ID, DEVICE_R1, DEVICE_R1_ID, @@ -63,90 +70,86 @@ os.environ[get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_HOST os.environ[get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(GRPC_PORT) os.environ[get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_HTTP)] = str(HTTP_PORT) -DEFAULT_REDIS_SERVICE_HOST = LOCAL_HOST -DEFAULT_REDIS_SERVICE_PORT = 6379 -DEFAULT_REDIS_DATABASE_ID = 0 +#DEFAULT_REDIS_SERVICE_HOST = LOCAL_HOST +#DEFAULT_REDIS_SERVICE_PORT = 6379 +#DEFAULT_REDIS_DATABASE_ID = 0 -REDIS_CONFIG = { - 'REDIS_SERVICE_HOST': os.environ.get('REDIS_SERVICE_HOST', DEFAULT_REDIS_SERVICE_HOST), - 'REDIS_SERVICE_PORT': os.environ.get('REDIS_SERVICE_PORT', DEFAULT_REDIS_SERVICE_PORT), - 'REDIS_DATABASE_ID' : os.environ.get('REDIS_DATABASE_ID', DEFAULT_REDIS_DATABASE_ID ), -} +#REDIS_CONFIG = { +# 'REDIS_SERVICE_HOST': os.environ.get('REDIS_SERVICE_HOST', DEFAULT_REDIS_SERVICE_HOST), +# 'REDIS_SERVICE_PORT': os.environ.get('REDIS_SERVICE_PORT', DEFAULT_REDIS_SERVICE_PORT), +# 'REDIS_DATABASE_ID' : os.environ.get('REDIS_DATABASE_ID', DEFAULT_REDIS_DATABASE_ID ), +#} -SCENARIOS = [ - ('all_sqlalchemy', {}, MessageBrokerBackendEnum.INMEMORY, {} ), - ('all_inmemory', DatabaseBackendEnum.INMEMORY, {}, MessageBrokerBackendEnum.INMEMORY, {} ) +#SCENARIOS = [ +# ('db:cockroach_mb:inmemory', None, {}, None, {}), +# ('all_inmemory', DatabaseBackendEnum.INMEMORY, {}, MessageBrokerBackendEnum.INMEMORY, {} ) # ('all_redis', DatabaseBackendEnum.REDIS, REDIS_CONFIG, MessageBrokerBackendEnum.REDIS, REDIS_CONFIG), -] +#] -@pytest.fixture(scope='session', ids=[str(scenario[0]) for scenario in SCENARIOS], params=SCENARIOS) -def context_s_mb(request) -> Tuple[Session, MessageBroker]: - name,db_session,mb_backend,mb_settings = request.param - msg = 'Running scenario {:s} db_session={:s}, mb_backend={:s}, mb_settings={:s}...' - LOGGER.info(msg.format(str(name), str(db_session), str(mb_backend.value), str(mb_settings))) - - db_uri = 'cockroachdb://root@10.152.183.111:26257/defaultdb?sslmode=disable' - LOGGER.debug('Connecting to DB: {}'.format(db_uri)) - - try: - engine = create_engine(db_uri) - except Exception as e: - LOGGER.error("Failed to connect to database.") - LOGGER.error(f"{e}") - return 1 +#@pytest.fixture(scope='session', ids=[str(scenario[0]) for scenario in SCENARIOS], params=SCENARIOS) +@pytest.fixture(scope='session') +def context_db_mb(request) -> Tuple[Session, MessageBroker]: + #name,db_session,mb_backend,mb_settings = request.param + #msg = 'Running scenario {:s} db_session={:s}, mb_backend={:s}, mb_settings={:s}...' + #LOGGER.info(msg.format(str(name), str(db_session), str(mb_backend.value), str(mb_settings))) - Base.metadata.create_all(engine) - _session = sessionmaker(bind=engine, expire_on_commit=False) + _db_engine = Engine().get_engine() - _message_broker = MessageBroker(get_messagebroker_backend(backend=mb_backend, **mb_settings)) - yield _session, _message_broker - _message_broker.terminate() + _msg_broker = MessageBroker(get_messagebroker_backend(backend=MessageBrokerBackendEnum.INMEMORY)) + yield _db_engine, _msg_broker + _msg_broker.terminate() @pytest.fixture(scope='session') -def context_service_grpc(context_s_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name - _service = ContextService(context_s_mb[0], context_s_mb[1]) +def context_service_grpc(context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name + _service = ContextService(context_db_mb[0], context_db_mb[1]) _service.start() yield _service _service.stop() -@pytest.fixture(scope='session') -def context_service_rest(context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name - database = context_db_mb[0] - _rest_server = RestServer() - for endpoint_name, resource_class, resource_url in RESOURCES: - _rest_server.add_resource(resource_class, resource_url, endpoint=endpoint_name, resource_class_args=(database,)) - _rest_server.start() - time.sleep(1) # bring time for the server to start - yield _rest_server - _rest_server.shutdown() - _rest_server.join() + +#@pytest.fixture(scope='session') +#def context_service_rest(context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name +# database = context_db_mb[0] +# _rest_server = RestServer() +# for endpoint_name, resource_class, resource_url in RESOURCES: +# _rest_server.add_resource(resource_class, resource_url, endpoint=endpoint_name, resource_class_args=(database,)) +# _rest_server.start() +# time.sleep(1) # bring time for the server to start +# yield _rest_server +# _rest_server.shutdown() +# _rest_server.join() + @pytest.fixture(scope='session') def context_client_grpc(context_service_grpc : ContextService): # pylint: disable=redefined-outer-name _client = ContextClient() yield _client _client.close() -""" -def do_rest_request(url : str): - base_url = get_service_baseurl_http(ServiceNameEnum.CONTEXT) - request_url = 'http://{:s}:{:s}{:s}{:s}'.format(str(LOCAL_HOST), str(HTTP_PORT), str(base_url), url) - LOGGER.warning('Request: GET {:s}'.format(str(request_url))) - reply = requests.get(request_url) - LOGGER.warning('Reply: {:s}'.format(str(reply.text))) - assert reply.status_code == 200, 'Reply failed with code {}'.format(reply.status_code) - return reply.json() -""" -"""# ----- Test gRPC methods ---------------------------------------------------------------------------------------------- -def test_grpc_context( - context_client_grpc : ContextClient, # pylint: disable=redefined-outer-name - context_s_mb : Tuple[Session, MessageBroker]): # pylint: disable=redefined-outer-name - Session = context_s_mb[0] +#def do_rest_request(url : str): +# base_url = get_service_baseurl_http(ServiceNameEnum.CONTEXT) +# request_url = 'http://{:s}:{:s}{:s}{:s}'.format(str(LOCAL_HOST), str(HTTP_PORT), str(base_url), url) +# LOGGER.warning('Request: GET {:s}'.format(str(request_url))) +# reply = requests.get(request_url) +# LOGGER.warning('Reply: {:s}'.format(str(reply.text))) +# assert reply.status_code == 200, 'Reply failed with code {}'.format(reply.status_code) +# return reply.json() - database = Database(Session) +# ----- Test gRPC methods ---------------------------------------------------------------------------------------------- + +def test_grpc_context( + context_client_grpc : ContextClient, # pylint: disable=redefined-outer-name + context_db_mb : Tuple[sqlalchemy.engine.Engine, MessageBroker] # pylint: disable=redefined-outer-name +) -> None: + db_engine = context_db_mb[0] # ----- Clean the database ----------------------------------------------------------------------------------------- - database.clear() + rebuild_database(db_engine, drop_if_exists=True) + # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- - events_collector = EventsCollector(context_client_grpc) + events_collector = EventsCollector( + context_client_grpc, log_events_received=True, + activate_context_collector = True, activate_topology_collector = False, activate_device_collector = False, + activate_link_collector = False, activate_service_collector = False, activate_slice_collector = False, + activate_connection_collector = False) events_collector.start() # ----- Get when the object does not exist ------------------------------------------------------------------------- @@ -163,71 +166,95 @@ def test_grpc_context( assert len(response.contexts) == 0 # ----- Dump state of database before create the object ------------------------------------------------------------ - db_entries = database.dump_all() - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 0 + #db_entries = database.dump_all() + #LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) + #for db_entry in db_entries: + # LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover + #LOGGER.info('-----------------------------------------------------------') + #assert len(db_entries) == 0 # ----- Create the object ------------------------------------------------------------------------------------------ response = context_client_grpc.SetContext(Context(**CONTEXT)) assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID - wrong_uuid = 'c97c4185-e1d1-4ea7-b6b9-afbf76cb61f4' + wrong_context_uuid = str(uuid.uuid4()) + wrong_context_id = json_context_id(wrong_context_uuid) with pytest.raises(grpc.RpcError) as e: - WRONG_TOPOLOGY_ID = copy.deepcopy(TOPOLOGY_ID) - WRONG_TOPOLOGY_ID['context_id']['context_uuid']['uuid'] = wrong_uuid WRONG_CONTEXT = copy.deepcopy(CONTEXT) - WRONG_CONTEXT['topology_ids'].append(WRONG_TOPOLOGY_ID) + WRONG_CONTEXT['topology_ids'].append(json_topology_id(str(uuid.uuid4()), context_id=wrong_context_id)) context_client_grpc.SetContext(Context(**WRONG_CONTEXT)) assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT msg = 'request.topology_ids[0].context_id.context_uuid.uuid({}) is invalid; '\ - 'should be == request.context_id.context_uuid.uuid({})'.format(wrong_uuid, DEFAULT_CONTEXT_UUID) + 'should be == request.context_id.context_uuid.uuid({})'.format(wrong_context_uuid, DEFAULT_CONTEXT_UUID) assert e.value.details() == msg with pytest.raises(grpc.RpcError) as e: - WRONG_SERVICE_ID = copy.deepcopy(SERVICE_R1_R2_ID) - WRONG_SERVICE_ID['context_id']['context_uuid']['uuid'] = wrong_uuid WRONG_CONTEXT = copy.deepcopy(CONTEXT) - WRONG_CONTEXT['service_ids'].append(WRONG_SERVICE_ID) + WRONG_CONTEXT['service_ids'].append(json_service_id(str(uuid.uuid4()), context_id=wrong_context_id)) context_client_grpc.SetContext(Context(**WRONG_CONTEXT)) assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT msg = 'request.service_ids[0].context_id.context_uuid.uuid({}) is invalid; '\ - 'should be == request.context_id.context_uuid.uuid({})'.format(wrong_uuid, DEFAULT_CONTEXT_UUID) + 'should be == request.context_id.context_uuid.uuid({})'.format(wrong_context_uuid, DEFAULT_CONTEXT_UUID) + assert e.value.details() == msg + + with pytest.raises(grpc.RpcError) as e: + WRONG_CONTEXT = copy.deepcopy(CONTEXT) + WRONG_CONTEXT['slice_ids'].append(json_slice_id(str(uuid.uuid4()), context_id=wrong_context_id)) + context_client_grpc.SetContext(Context(**WRONG_CONTEXT)) + assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT + msg = 'request.slice_ids[0].context_id.context_uuid.uuid({}) is invalid; '\ + 'should be == request.context_id.context_uuid.uuid({})'.format(wrong_context_uuid, DEFAULT_CONTEXT_UUID) assert e.value.details() == msg # ----- Check create event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True) + event = events_collector.get_event(block=True, timeout=10.0) assert isinstance(event, ContextEvent) - assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert event.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + #assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert event.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + + # ----- Get when the object exists --------------------------------------------------------------------------------- + response = context_client_grpc.GetContext(ContextId(**CONTEXT_ID)) + assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.name == '' + assert len(response.topology_ids) == 0 + assert len(response.service_ids) == 0 + assert len(response.slice_ids) == 0 + + # ----- List when the object exists -------------------------------------------------------------------------------- + response = context_client_grpc.ListContextIds(Empty()) + assert len(response.context_ids) == 1 + assert response.context_ids[0].context_uuid.uuid == DEFAULT_CONTEXT_UUID + + response = context_client_grpc.ListContexts(Empty()) + assert len(response.contexts) == 1 + assert response.contexts[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.contexts[0].name == '' + assert len(response.contexts[0].topology_ids) == 0 + assert len(response.contexts[0].service_ids) == 0 + assert len(response.contexts[0].slice_ids) == 0 + # ----- Update the object ------------------------------------------------------------------------------------------ - response = context_client_grpc.SetContext(Context(**CONTEXT)) + new_context_name = 'new' + CONTEXT_WITH_NAME = copy.deepcopy(CONTEXT) + CONTEXT_WITH_NAME['name'] = new_context_name + response = context_client_grpc.SetContext(Context(**CONTEXT_WITH_NAME)) assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID # ----- Check update event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True) + event = events_collector.get_event(block=True, timeout=10.0) assert isinstance(event, ContextEvent) - assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - assert event.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - # ----- Dump state of database after create/update the object ------------------------------------------------------ - db_entries = database.dump_all() - - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(db_entry) - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 1 + #assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + #assert event.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - # ----- Get when the object exists --------------------------------------------------------------------------------- + # ----- Get when the object is modified ---------------------------------------------------------------------------- response = context_client_grpc.GetContext(ContextId(**CONTEXT_ID)) assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.name == new_context_name assert len(response.topology_ids) == 0 assert len(response.service_ids) == 0 + assert len(response.slice_ids) == 0 - # ----- List when the object exists -------------------------------------------------------------------------------- + # ----- List when the object is modified --------------------------------------------------------------------------- response = context_client_grpc.ListContextIds(Empty()) assert len(response.context_ids) == 1 assert response.context_ids[0].context_uuid.uuid == DEFAULT_CONTEXT_UUID @@ -235,35 +262,53 @@ def test_grpc_context( response = context_client_grpc.ListContexts(Empty()) assert len(response.contexts) == 1 assert response.contexts[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.contexts[0].name == new_context_name assert len(response.contexts[0].topology_ids) == 0 assert len(response.contexts[0].service_ids) == 0 + assert len(response.contexts[0].slice_ids) == 0 + + # ----- Dump state of database after create/update the object ------------------------------------------------------ + #db_entries = database.dump_all() + #LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) + #for db_entry in db_entries: + # LOGGER.info(db_entry) + #LOGGER.info('-----------------------------------------------------------') + #assert len(db_entries) == 1 # ----- Remove the object ------------------------------------------------------------------------------------------ context_client_grpc.RemoveContext(ContextId(**CONTEXT_ID)) # ----- Check remove event ----------------------------------------------------------------------------------------- - # event = events_collector.get_event(block=True) - # assert isinstance(event, ContextEvent) - # assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - # assert event.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + event = events_collector.get_event(block=True, timeout=10.0) + assert isinstance(event, ContextEvent) + #assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert event.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + + # ----- List after deleting the object ----------------------------------------------------------------------------- + response = context_client_grpc.ListContextIds(Empty()) + assert len(response.context_ids) == 0 + + response = context_client_grpc.ListContexts(Empty()) + assert len(response.contexts) == 0 # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- events_collector.stop() # ----- Dump state of database after remove the object ------------------------------------------------------------- - db_entries = database.dump_all() - - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(db_entry) - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 0 + #db_entries = database.dump_all() + #LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) + #for db_entry in db_entries: + # LOGGER.info(db_entry) + #LOGGER.info('-----------------------------------------------------------') + #assert len(db_entries) == 0 + raise Exception() +""" def test_grpc_topology( context_client_grpc: ContextClient, # pylint: disable=redefined-outer-name - context_s_mb: Tuple[Session, MessageBroker]): # pylint: disable=redefined-outer-name - session = context_s_mb[0] + context_db_mb: Tuple[Session, MessageBroker]): # pylint: disable=redefined-outer-name + session = context_db_mb[0] database = Database(session) @@ -394,8 +439,8 @@ def test_grpc_topology( def test_grpc_device( context_client_grpc: ContextClient, # pylint: disable=redefined-outer-name - context_s_mb: Tuple[Session, MessageBroker]): # pylint: disable=redefined-outer-name - session = context_s_mb[0] + context_db_mb: Tuple[Session, MessageBroker]): # pylint: disable=redefined-outer-name + session = context_db_mb[0] database = Database(session) @@ -571,8 +616,8 @@ def test_grpc_device( def test_grpc_link( context_client_grpc: ContextClient, # pylint: disable=redefined-outer-name - context_s_mb: Tuple[Session, MessageBroker]): # pylint: disable=redefined-outer-name - session = context_s_mb[0] + context_db_mb: Tuple[Session, MessageBroker]): # pylint: disable=redefined-outer-name + session = context_db_mb[0] database = Database(session) @@ -753,10 +798,11 @@ def test_grpc_link( assert len(db_entries) == 0 """ +""" def test_grpc_service( context_client_grpc : ContextClient, # pylint: disable=redefined-outer-name - context_s_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name - Session = context_s_mb[0] + context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name + Session = context_db_mb[0] # ----- Clean the database ----------------------------------------------------------------------------------------- database = Database(Session) database.clear() @@ -941,14 +987,13 @@ def test_grpc_service( LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover LOGGER.info('-----------------------------------------------------------') assert len(db_entries) == 0 - - """ +""" def test_grpc_connection( context_client_grpc : ContextClient, # pylint: disable=redefined-outer-name context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name - Session = context_s_mb[0] + Session = context_db_mb[0] database = Database(Session) -- GitLab From a8e2c9b3b1ada55a6aded67b951cc18e1c84578c Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Fri, 23 Dec 2022 12:07:49 +0000 Subject: [PATCH 015/158] Context component: - updated manifest - corrected README.md notes - corrected script run-tests-locally - partial code implementation --- manifests/cockroachdb/README.md | 18 ++--- manifests/contextservice.yaml | 2 +- scripts/run_tests_locally-context.sh | 4 +- .../service/ContextServiceServicerImpl.py | 71 ++++++++++++------- src/context/tests/test_unitary.py | 28 ++++---- 5 files changed, 69 insertions(+), 54 deletions(-) diff --git a/manifests/cockroachdb/README.md b/manifests/cockroachdb/README.md index 6807afbb0..ce99f5034 100644 --- a/manifests/cockroachdb/README.md +++ b/manifests/cockroachdb/README.md @@ -12,7 +12,7 @@ kubectl apply -f "${DEPLOY_PATH}/crds.yaml" # Deploy CockroachDB Operator curl -o "${DEPLOY_PATH}/operator.yaml" "${OPERATOR_BASE_URL}/install/operator.yaml" # edit "${DEPLOY_PATH}/operator.yaml" -# - add env var: WATCH_NAMESPACE='tfs-ccdb' +# - add env var: WATCH_NAMESPACE='tfs-crdb' kubectl apply -f "${DEPLOY_PATH}/operator.yaml" # Deploy CockroachDB @@ -20,21 +20,21 @@ curl -o "${DEPLOY_PATH}/cluster.yaml" "${OPERATOR_BASE_URL}/examples/example.yam # edit "${DEPLOY_PATH}/cluster.yaml" # - set version # - set number of replicas -kubectl create namespace tfs-ccdb -kubectl apply --namespace tfs-ccdb -f "${DEPLOY_PATH}/cluster.yaml" +kubectl create namespace tfs-crdb +kubectl apply --namespace tfs-crdb -f "${DEPLOY_PATH}/cluster.yaml" # Deploy CockroachDB Client curl -o "${DEPLOY_PATH}/client-secure-operator.yaml" "${OPERATOR_BASE_URL}/examples/client-secure-operator.yaml" -kubectl create --namespace tfs-ccdb -f "${DEPLOY_PATH}/client-secure-operator.yaml" +kubectl create --namespace tfs-crdb -f "${DEPLOY_PATH}/client-secure-operator.yaml" # Add tfs user with admin rights -$ kubectl exec -it ccdb-client-secure --namespace tfs-ccdb -- ./cockroach sql --certs-dir=/cockroach/cockroach-certs --host=cockroachdb-public +$ kubectl exec -it cockroachdb-client-secure --namespace tfs-crdb -- ./cockroach sql --certs-dir=/cockroach/cockroach-certs --host=cockroachdb-public -- CREATE USER tfs WITH PASSWORD 'tfs123'; -- GRANT admin TO tfs; # Expose CockroachDB SQL port (26257) -PORT=$(kubectl --namespace cockroachdb get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}') -PATCH='{"data": {"'${PORT}'": "cockroachdb/cockroachdb-public:'${PORT}'"}}' +PORT=$(kubectl --namespace tfs-crdb get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}') +PATCH='{"data": {"'${PORT}'": "tfs-crdb/cockroachdb-public:'${PORT}'"}}' kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}" PORT_MAP='{"containerPort": '${PORT}', "hostPort": '${PORT}'}' @@ -43,8 +43,8 @@ PATCH='{"spec": {"template": {"spec": {"containers": ['${CONTAINER}']}}}}' kubectl patch daemonset nginx-ingress-microk8s-controller --namespace ingress --patch "${PATCH}" # Expose CockroachDB Console port (8080) -PORT=$(kubectl --namespace cockroachdb get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="http")].port}') -PATCH='{"data": {"'${PORT}'": "cockroachdb/cockroachdb-public:'${PORT}'"}}' +PORT=$(kubectl --namespace tfs-crdb get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="http")].port}') +PATCH='{"data": {"'${PORT}'": "tfs-crdb/cockroachdb-public:'${PORT}'"}}' kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}" PORT_MAP='{"containerPort": '${PORT}', "hostPort": '${PORT}'}' diff --git a/manifests/contextservice.yaml b/manifests/contextservice.yaml index 8201aed3e..dc7e548ce 100644 --- a/manifests/contextservice.yaml +++ b/manifests/contextservice.yaml @@ -47,7 +47,7 @@ spec: - containerPort: 8080 env: - name: CCDB_URL - value: "cockroachdb://tfs:tfs123@cockroachdb-public.cockroachdb.svc.cluster.local:26257/tfs?sslmode=require" + value: "cockroachdb://tfs:tfs123@10.1.7.195:26257/tfs?sslmode=require" - name: DB_BACKEND value: "redis" - name: MB_BACKEND diff --git a/scripts/run_tests_locally-context.sh b/scripts/run_tests_locally-context.sh index bf0cccd6b..ec12d8a80 100755 --- a/scripts/run_tests_locally-context.sh +++ b/scripts/run_tests_locally-context.sh @@ -36,13 +36,13 @@ cd $PROJECTDIR/src #export REDIS_SERVICE_HOST=$(kubectl get node $TFS_K8S_HOSTNAME -o 'jsonpath={.status.addresses[?(@.type=="InternalIP")].address}') #export REDIS_SERVICE_PORT=$(kubectl --namespace $TFS_K8S_NAMESPACE get service redis-tests -o 'jsonpath={.spec.ports[?(@.port==6379)].nodePort}') -export CRDB_URI="cockroachdb://tfs:tfs123@127.0.0.1:26257/tfs?sslmode=require" +export CRDB_URI="cockroachdb://tfs:tfs123@10.1.7.195:26257/tfs?sslmode=require" +export PYTHONPATH=/home/tfs/tfs-ctrl/src # Run unitary tests and analyze coverage of code at same time #coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose --maxfail=1 \ # context/tests/test_unitary.py -source tfs_runtime_env_vars.sh pytest --log-level=INFO --verbose -o log_cli=true --maxfail=1 \ context/tests/test_unitary.py diff --git a/src/context/service/ContextServiceServicerImpl.py b/src/context/service/ContextServiceServicerImpl.py index fcb0024d2..f51e725cd 100644 --- a/src/context/service/ContextServiceServicerImpl.py +++ b/src/context/service/ContextServiceServicerImpl.py @@ -62,7 +62,7 @@ from context.service.database.ContextModel import ContextModel #from .Constants import ( # CONSUME_TIMEOUT, TOPIC_CONNECTION, TOPIC_CONTEXT, TOPIC_DEVICE, TOPIC_LINK, TOPIC_SERVICE, TOPIC_SLICE, # TOPIC_TOPOLOGY) -from .ChangeFeedClient import ChangeFeedClient +#from .ChangeFeedClient import ChangeFeedClient LOGGER = logging.getLogger(__name__) @@ -178,37 +178,54 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer @safe_and_metered_rpc_method(METRICS, LOGGER) def GetContextEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[ContextEvent]: + pass #for message in self.messagebroker.consume({TOPIC_CONTEXT}, consume_timeout=CONSUME_TIMEOUT): # yield ContextEvent(**json.loads(message.content)) - cf = ChangeFeedClient() - ready = cf.initialize() - if not ready: raise OperationFailedException('Initialize ChangeFeed') - for timestamp, _, primary_key, is_delete, after in cf.get_changes('context'): - if is_delete: - event_type = EventTypeEnum.EVENTTYPE_REMOVE - else: - is_create = (timestamp - after.get('created_at')) < 1.0 - event_type = EventTypeEnum.EVENTTYPE_CREATE if is_create else EventTypeEnum.EVENTTYPE_UPDATE - event = { - 'event': {'timestamp': {'timestamp': timestamp}, 'event_type': event_type}, - 'context_id': json_context_id(primary_key[0]), - } - yield ContextEvent(**event) + #cf = ChangeFeedClient() + #ready = cf.initialize() + #if not ready: raise OperationFailedException('Initialize ChangeFeed') + #for timestamp, _, primary_key, is_delete, after in cf.get_changes('context'): + # if is_delete: + # event_type = EventTypeEnum.EVENTTYPE_REMOVE + # else: + # is_create = (timestamp - after.get('created_at')) < 1.0 + # event_type = EventTypeEnum.EVENTTYPE_CREATE if is_create else EventTypeEnum.EVENTTYPE_UPDATE + # event = { + # 'event': {'timestamp': {'timestamp': timestamp}, 'event_type': event_type}, + # 'context_id': json_context_id(primary_key[0]), + # } + # yield ContextEvent(**event) # ----- Topology --------------------------------------------------------------------------------------------------- -# @safe_and_metered_rpc_method(METRICS, LOGGER) -# def ListTopologyIds(self, request: ContextId, context : grpc.ServicerContext) -> TopologyIdList: -# context_uuid = request.context_uuid.uuid -# -# with self.session() as session: -# result = session.query(ContextModel).options(selectinload(ContextModel.topology)).filter_by(context_uuid=context_uuid).one_or_none() -# if not result: -# raise NotFoundException(ContextModel.__name__.replace('Model', ''), context_uuid) -# -# db_topologies = result.topology -# return TopologyIdList(topology_ids=[db_topology.dump_id() for db_topology in db_topologies]) -# + @safe_and_metered_rpc_method(METRICS, LOGGER) + def ListTopologyIds(self, request: ContextId, context : grpc.ServicerContext) -> TopologyIdList: + context_uuid = request.context_uuid.uuid + + with self.session() as session: + result = session.query(ContextModel).options(selectinload(ContextModel.topology)).filter_by(context_uuid=context_uuid).one_or_none() + if not result: + raise NotFoundException(ContextModel.__name__.replace('Model', ''), context_uuid) + + db_topologies = result.topology + return TopologyIdList(topology_ids=[db_topology.dump_id() for db_topology in db_topologies]) + return ContextIdList(context_ids=run_transaction(sessionmaker(bind=self.db_engine), callback)) + + + def callback(session : Session) -> List[Dict]: + obj_list : List[ContextModel] = session.query(ContextModel).all() + return [obj.dump_id() for obj in obj_list] + + + @safe_and_metered_rpc_method(METRICS, LOGGER) + def ListContexts(self, request: Empty, context : grpc.ServicerContext) -> ContextList: + def callback(session : Session) -> List[Dict]: + obj_list : List[ContextModel] = session.query(ContextModel).all() + return [obj.dump() for obj in obj_list] + return ContextList(contexts=run_transaction(sessionmaker(bind=self.db_engine), callback)) + + + # @safe_and_metered_rpc_method(METRICS, LOGGER) # def ListTopologies(self, request: ContextId, context : grpc.ServicerContext) -> TopologyList: # context_uuid = request.context_uuid.uuid diff --git a/src/context/tests/test_unitary.py b/src/context/tests/test_unitary.py index 8bf1b4ff1..32c571359 100644 --- a/src/context/tests/test_unitary.py +++ b/src/context/tests/test_unitary.py @@ -145,12 +145,12 @@ def test_grpc_context( rebuild_database(db_engine, drop_if_exists=True) # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- - events_collector = EventsCollector( - context_client_grpc, log_events_received=True, - activate_context_collector = True, activate_topology_collector = False, activate_device_collector = False, - activate_link_collector = False, activate_service_collector = False, activate_slice_collector = False, - activate_connection_collector = False) - events_collector.start() + #events_collector = EventsCollector( + # context_client_grpc, log_events_received=True, + # activate_context_collector = True, activate_topology_collector = False, activate_device_collector = False, + # activate_link_collector = False, activate_service_collector = False, activate_slice_collector = False, + # activate_connection_collector = False) + #events_collector.start() # ----- Get when the object does not exist ------------------------------------------------------------------------- with pytest.raises(grpc.RpcError) as e: @@ -207,8 +207,8 @@ def test_grpc_context( assert e.value.details() == msg # ----- Check create event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True, timeout=10.0) - assert isinstance(event, ContextEvent) + #event = events_collector.get_event(block=True, timeout=10.0) + #assert isinstance(event, ContextEvent) #assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE #assert event.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID @@ -241,8 +241,8 @@ def test_grpc_context( assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID # ----- Check update event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True, timeout=10.0) - assert isinstance(event, ContextEvent) + #event = events_collector.get_event(block=True, timeout=10.0) + #assert isinstance(event, ContextEvent) #assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE #assert event.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID @@ -279,8 +279,8 @@ def test_grpc_context( context_client_grpc.RemoveContext(ContextId(**CONTEXT_ID)) # ----- Check remove event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True, timeout=10.0) - assert isinstance(event, ContextEvent) + #event = events_collector.get_event(block=True, timeout=10.0) + #assert isinstance(event, ContextEvent) #assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE #assert event.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID @@ -292,7 +292,7 @@ def test_grpc_context( assert len(response.contexts) == 0 # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- - events_collector.stop() + #events_collector.stop() # ----- Dump state of database after remove the object ------------------------------------------------------------- #db_entries = database.dump_all() @@ -302,8 +302,6 @@ def test_grpc_context( #LOGGER.info('-----------------------------------------------------------') #assert len(db_entries) == 0 - raise Exception() - """ def test_grpc_topology( context_client_grpc: ContextClient, # pylint: disable=redefined-outer-name -- GitLab From 77483ce3b5410bc02b2dbca883cf0bea31dabef1 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Fri, 23 Dec 2022 15:26:12 +0000 Subject: [PATCH 016/158] Context component: - partial code implementation --- .../service/ContextServiceServicerImpl.py | 93 ++++++++++--------- src/context/service/database/ContextModel.py | 11 ++- src/context/service/database/TopologyModel.py | 35 +++---- src/context/service/database/__init__.py | 1 + 4 files changed, 75 insertions(+), 65 deletions(-) diff --git a/src/context/service/ContextServiceServicerImpl.py b/src/context/service/ContextServiceServicerImpl.py index f51e725cd..6db5b99e7 100644 --- a/src/context/service/ContextServiceServicerImpl.py +++ b/src/context/service/ContextServiceServicerImpl.py @@ -58,7 +58,7 @@ from context.service.database.ContextModel import ContextModel #from context.service.database.ServiceModel import ( # ServiceModel, grpc_to_enum__service_status, grpc_to_enum__service_type) #from context.service.database.SliceModel import SliceModel, grpc_to_enum__slice_status -#from context.service.database.TopologyModel import TopologyModel +from context.service.database.TopologyModel import TopologyModel #from .Constants import ( # CONSUME_TIMEOUT, TOPIC_CONNECTION, TOPIC_CONTEXT, TOPIC_DEVICE, TOPIC_LINK, TOPIC_SERVICE, TOPIC_SLICE, # TOPIC_TOPOLOGY) @@ -111,8 +111,10 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer def GetContext(self, request: ContextId, context : grpc.ServicerContext) -> Context: context_uuid = request.context_uuid.uuid def callback(session : Session) -> Optional[Dict]: - obj : Optional[ContextModel] = \ - session.query(ContextModel).filter_by(context_uuid=context_uuid).one_or_none() + obj : Optional[ContextModel] = session\ + .query(ContextModel)\ + .filter_by(context_uuid=context_uuid)\ + .one_or_none() return None if obj is None else obj.dump() obj = run_transaction(sessionmaker(bind=self.db_engine), callback) if obj is None: raise NotFoundException(ContextModel.__name__.replace('Model', ''), context_uuid) @@ -202,47 +204,50 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer def ListTopologyIds(self, request: ContextId, context : grpc.ServicerContext) -> TopologyIdList: context_uuid = request.context_uuid.uuid - with self.session() as session: - result = session.query(ContextModel).options(selectinload(ContextModel.topology)).filter_by(context_uuid=context_uuid).one_or_none() - if not result: - raise NotFoundException(ContextModel.__name__.replace('Model', ''), context_uuid) - - db_topologies = result.topology - return TopologyIdList(topology_ids=[db_topology.dump_id() for db_topology in db_topologies]) - return ContextIdList(context_ids=run_transaction(sessionmaker(bind=self.db_engine), callback)) - - def callback(session : Session) -> List[Dict]: - obj_list : List[ContextModel] = session.query(ContextModel).all() + obj_list : List[TopologyModel] = session.query(TopologyModel).filter_by(context_uuid=context_uuid).all() + #.options(selectinload(ContextModel.topology)).filter_by(context_uuid=context_uuid).one_or_none() return [obj.dump_id() for obj in obj_list] - + + #with self.session() as session: + # result = session.query(ContextModel).options(selectinload(ContextModel.topology)).filter_by(context_uuid=context_uuid).one_or_none() + # if not result: + # raise NotFoundException(ContextModel.__name__.replace('Model', ''), context_uuid) + # db_topologies = result.topology + return TopologyIdList(topology_ids=run_transaction(sessionmaker(bind=self.db_engine), callback)) @safe_and_metered_rpc_method(METRICS, LOGGER) - def ListContexts(self, request: Empty, context : grpc.ServicerContext) -> ContextList: + def ListTopologies(self, request: ContextId, context : grpc.ServicerContext) -> TopologyList: + context_uuid = request.context_uuid.uuid + def callback(session : Session) -> List[Dict]: - obj_list : List[ContextModel] = session.query(ContextModel).all() + obj_list : List[TopologyModel] = session.query(TopologyModel).filter_by(context_uuid=context_uuid).all() + #.options(selectinload(ContextModel.topology)).filter_by(context_uuid=context_uuid).one_or_none() return [obj.dump() for obj in obj_list] - return ContextList(contexts=run_transaction(sessionmaker(bind=self.db_engine), callback)) + #with self.session() as session: + # result = session.query(ContextModel).options(selectinload(ContextModel.topology)).filter_by( + # context_uuid=context_uuid).one_or_none() + # if not result: + # raise NotFoundException(ContextModel.__name__.replace('Model', ''), context_uuid) + # db_topologies = result.topology + return TopologyList(topologies=run_transaction(sessionmaker(bind=self.db_engine), callback)) + @safe_and_metered_rpc_method(METRICS, LOGGER) + def GetTopology(self, request: TopologyId, context : grpc.ServicerContext) -> Topology: + context_uuid = request.context_id.context_uuid.uuid + topology_uuid = request.topology_uuid.uuid + + def callback(session : Session) -> Optional[Dict]: + obj : Optional[TopologyModel] = session\ + .query(TopologyModel)\ + .filter_by(context_uuid=context_uuid, topology_uuid=topology_uuid)\ + .one_or_none() + return None if obj is None else obj.dump() + obj = run_transaction(sessionmaker(bind=self.db_engine), callback) + if obj is None: raise NotFoundException(TopologyModel.__name__.replace('Model', ''), context_uuid) + return Topology(**obj) -# @safe_and_metered_rpc_method(METRICS, LOGGER) -# def ListTopologies(self, request: ContextId, context : grpc.ServicerContext) -> TopologyList: -# context_uuid = request.context_uuid.uuid -# -# with self.session() as session: -# result = session.query(ContextModel).options(selectinload(ContextModel.topology)).filter_by( -# context_uuid=context_uuid).one_or_none() -# if not result: -# raise NotFoundException(ContextModel.__name__.replace('Model', ''), context_uuid) -# -# db_topologies = result.topology -# return TopologyList(topologies=[db_topology.dump() for db_topology in db_topologies]) -# -# @safe_and_metered_rpc_method(METRICS, LOGGER) -# def GetTopology(self, request: TopologyId, context : grpc.ServicerContext) -> Topology: -# topology_uuid = request.topology_uuid.uuid -# # result, dump = self.database.get_object(TopologyModel, topology_uuid, True) # with self.session() as session: # devs = None @@ -265,8 +270,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # links.append(session.query(LinkModel).filter_by(**filt).one()) # # return Topology(**result.dump(devs, links)) -# -# + # @safe_and_metered_rpc_method(METRICS, LOGGER) # def SetTopology(self, request: Topology, context : grpc.ServicerContext) -> TopologyId: # context_uuid = request.topology_id.context_id.context_uuid.uuid @@ -300,7 +304,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # dict_topology_id = db_topology.dump_id() # notify_event(self.messagebroker, TOPIC_TOPOLOGY, event_type, {'topology_id': dict_topology_id}) # return TopologyId(**dict_topology_id) -# + # @safe_and_metered_rpc_method(METRICS, LOGGER) # def RemoveTopology(self, request: TopologyId, context : grpc.ServicerContext) -> Empty: # context_uuid = request.context_id.context_uuid.uuid @@ -317,13 +321,12 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # event_type = EventTypeEnum.EVENTTYPE_REMOVE # notify_event(self.messagebroker, TOPIC_TOPOLOGY, event_type, {'topology_id': dict_topology_id}) # return Empty() -# -## @safe_and_metered_rpc_method(METRICS, LOGGER) -## def GetTopologyEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[TopologyEvent]: -## for message in self.messagebroker.consume({TOPIC_TOPOLOGY}, consume_timeout=CONSUME_TIMEOUT): -## yield TopologyEvent(**json.loads(message.content)) -# -# + +# @safe_and_metered_rpc_method(METRICS, LOGGER) +# def GetTopologyEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[TopologyEvent]: +# for message in self.messagebroker.consume({TOPIC_TOPOLOGY}, consume_timeout=CONSUME_TIMEOUT): +# yield TopologyEvent(**json.loads(message.content)) + # # ----- Device ----------------------------------------------------------------------------------------------------- # # @safe_and_metered_rpc_method(METRICS, LOGGER) diff --git a/src/context/service/database/ContextModel.py b/src/context/service/database/ContextModel.py index 9ad5e0bcb..241198d3f 100644 --- a/src/context/service/database/ContextModel.py +++ b/src/context/service/database/ContextModel.py @@ -16,8 +16,8 @@ import logging from typing import Dict from sqlalchemy import Column, Float, String from sqlalchemy.dialects.postgresql import UUID +from sqlalchemy.orm import relationship from ._Base import _Base -#from sqlalchemy.orm import relationship LOGGER = logging.getLogger(__name__) @@ -27,7 +27,7 @@ class ContextModel(_Base): context_name = Column(String(), nullable=False) created_at = Column(Float) - #topology = relationship('TopologyModel', back_populates='context') + topology = relationship('TopologyModel', back_populates='context') def dump_id(self) -> Dict: return {'context_uuid': {'uuid': self.context_uuid}} @@ -48,8 +48,13 @@ class ContextModel(_Base): return [TopologyModel(self.database, pk).dump_id() for pk,_ in db_topology_pks] """ - def dump(self, include_services=True, include_topologies=True) -> Dict: # pylint: disable=arguments-differ + def dump(self, + include_services : bool = True, # pylint: disable=arguments-differ + include_slices : bool = True, # pylint: disable=arguments-differ + include_topologies : bool = True # pylint: disable=arguments-differ + ) -> Dict: result = {'context_id': self.dump_id(), 'name': self.context_name} # if include_services: result['service_ids'] = self.dump_service_ids() + # if include_slices: result['slice_ids'] = self.dump_slice_ids() # if include_topologies: result['topology_ids'] = self.dump_topology_ids() return result diff --git a/src/context/service/database/TopologyModel.py b/src/context/service/database/TopologyModel.py index 0a5698163..102e3ae3f 100644 --- a/src/context/service/database/TopologyModel.py +++ b/src/context/service/database/TopologyModel.py @@ -12,21 +12,22 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging, operator -from typing import Dict, List -from sqlalchemy.orm import relationship +import logging #, operator +from typing import Dict #, List from sqlalchemy import Column, ForeignKey from sqlalchemy.dialects.postgresql import UUID -from context.service.database._Base import Base +from sqlalchemy.orm import relationship +from ._Base import _Base + LOGGER = logging.getLogger(__name__) -class TopologyModel(Base): +class TopologyModel(_Base): __tablename__ = 'Topology' - context_uuid = Column(UUID(as_uuid=False), ForeignKey("Context.context_uuid"), primary_key=True) + context_uuid = Column(UUID(as_uuid=False), ForeignKey('context.context_uuid'), primary_key=True) topology_uuid = Column(UUID(as_uuid=False), primary_key=True, unique=True) # Relationships - context = relationship("ContextModel", back_populates="topology") + context = relationship('ContextModel', back_populates='topology') def dump_id(self) -> Dict: context_id = self.context.dump_id() @@ -35,16 +36,16 @@ class TopologyModel(Base): 'topology_uuid': {'uuid': self.topology_uuid}, } - @staticmethod - def main_pk_name() -> str: - return 'topology_uuid' + #@staticmethod + #def main_pk_name() -> str: + # return 'topology_uuid' - def dump( # pylint: disable=arguments-differ - self, devices=None, links=None - ) -> Dict: + def dump(self) -> Dict: + # pylint: disable=arguments-differ result = {'topology_id': self.dump_id()} - if devices: - result['device_ids'] = [device.dump_id() for device in devices] - if links: - result['link_ids'] = [link.dump_id() for link in links] + # params: , devices=None, links=None + #if devices: + # result['device_ids'] = [device.dump_id() for device in devices] + #if links: + # result['link_ids'] = [link.dump_id() for link in links] return result diff --git a/src/context/service/database/__init__.py b/src/context/service/database/__init__.py index 980265786..c4940470a 100644 --- a/src/context/service/database/__init__.py +++ b/src/context/service/database/__init__.py @@ -14,3 +14,4 @@ from ._Base import _Base, rebuild_database from .ContextModel import ContextModel +from .TopologyModel import TopologyModel -- GitLab From 177e96a812917f0d426f041c7beaf259d62d1158 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Mon, 2 Jan 2023 17:58:59 +0000 Subject: [PATCH 017/158] CockroachDB: - updated manifests to v22.2.0 - renamed namespace to crdb - added script to launch cockroachdb client --- manifests/cockroachdb/README.md | 22 +++++++++---------- .../cockroachdb/client-secure-operator.yaml | 2 +- manifests/cockroachdb/cluster.yaml | 4 ++-- manifests/cockroachdb/operator.yaml | 18 +++++++++++++-- scripts/cockroachdb_client.sh | 16 ++++++++++++++ 5 files changed, 46 insertions(+), 16 deletions(-) create mode 100755 scripts/cockroachdb_client.sh diff --git a/manifests/cockroachdb/README.md b/manifests/cockroachdb/README.md index ce99f5034..b61e05f82 100644 --- a/manifests/cockroachdb/README.md +++ b/manifests/cockroachdb/README.md @@ -12,7 +12,7 @@ kubectl apply -f "${DEPLOY_PATH}/crds.yaml" # Deploy CockroachDB Operator curl -o "${DEPLOY_PATH}/operator.yaml" "${OPERATOR_BASE_URL}/install/operator.yaml" # edit "${DEPLOY_PATH}/operator.yaml" -# - add env var: WATCH_NAMESPACE='tfs-crdb' +# - add env var: WATCH_NAMESPACE='crdb' kubectl apply -f "${DEPLOY_PATH}/operator.yaml" # Deploy CockroachDB @@ -20,21 +20,21 @@ curl -o "${DEPLOY_PATH}/cluster.yaml" "${OPERATOR_BASE_URL}/examples/example.yam # edit "${DEPLOY_PATH}/cluster.yaml" # - set version # - set number of replicas -kubectl create namespace tfs-crdb -kubectl apply --namespace tfs-crdb -f "${DEPLOY_PATH}/cluster.yaml" +kubectl create namespace crdb +kubectl apply --namespace crdb -f "${DEPLOY_PATH}/cluster.yaml" # Deploy CockroachDB Client curl -o "${DEPLOY_PATH}/client-secure-operator.yaml" "${OPERATOR_BASE_URL}/examples/client-secure-operator.yaml" -kubectl create --namespace tfs-crdb -f "${DEPLOY_PATH}/client-secure-operator.yaml" +kubectl create --namespace crdb -f "${DEPLOY_PATH}/client-secure-operator.yaml" # Add tfs user with admin rights -$ kubectl exec -it cockroachdb-client-secure --namespace tfs-crdb -- ./cockroach sql --certs-dir=/cockroach/cockroach-certs --host=cockroachdb-public --- CREATE USER tfs WITH PASSWORD 'tfs123'; --- GRANT admin TO tfs; +kubectl exec -it cockroachdb-client-secure --namespace crdb -- ./cockroach sql --certs-dir=/cockroach/cockroach-certs --host=cockroachdb-public + CREATE USER tfs WITH PASSWORD 'tfs123'; + GRANT admin TO tfs; # Expose CockroachDB SQL port (26257) -PORT=$(kubectl --namespace tfs-crdb get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}') -PATCH='{"data": {"'${PORT}'": "tfs-crdb/cockroachdb-public:'${PORT}'"}}' +PORT=$(kubectl --namespace crdb get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}') +PATCH='{"data": {"'${PORT}'": "crdb/cockroachdb-public:'${PORT}'"}}' kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}" PORT_MAP='{"containerPort": '${PORT}', "hostPort": '${PORT}'}' @@ -43,8 +43,8 @@ PATCH='{"spec": {"template": {"spec": {"containers": ['${CONTAINER}']}}}}' kubectl patch daemonset nginx-ingress-microk8s-controller --namespace ingress --patch "${PATCH}" # Expose CockroachDB Console port (8080) -PORT=$(kubectl --namespace tfs-crdb get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="http")].port}') -PATCH='{"data": {"'${PORT}'": "tfs-crdb/cockroachdb-public:'${PORT}'"}}' +PORT=$(kubectl --namespace crdb get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="http")].port}') +PATCH='{"data": {"'${PORT}'": "crdb/cockroachdb-public:'${PORT}'"}}' kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}" PORT_MAP='{"containerPort": '${PORT}', "hostPort": '${PORT}'}' diff --git a/manifests/cockroachdb/client-secure-operator.yaml b/manifests/cockroachdb/client-secure-operator.yaml index 618d30ce6..f7f81c833 100644 --- a/manifests/cockroachdb/client-secure-operator.yaml +++ b/manifests/cockroachdb/client-secure-operator.yaml @@ -23,7 +23,7 @@ spec: serviceAccountName: cockroachdb-sa containers: - name: cockroachdb-client-secure - image: cockroachdb/cockroach:v22.1.8 + image: cockroachdb/cockroach:v22.2.0 imagePullPolicy: IfNotPresent volumeMounts: - name: client-certs diff --git a/manifests/cockroachdb/cluster.yaml b/manifests/cockroachdb/cluster.yaml index d36685109..f7444c006 100644 --- a/manifests/cockroachdb/cluster.yaml +++ b/manifests/cockroachdb/cluster.yaml @@ -40,9 +40,9 @@ spec: memory: 4Gi tlsEnabled: true # You can set either a version of the db or a specific image name -# cockroachDBVersion: v22.1.12 +# cockroachDBVersion: v22.2.0 image: - name: cockroachdb/cockroach:v22.1.12 + name: cockroachdb/cockroach:v22.2.0 # nodes refers to the number of crdb pods that are created # via the statefulset nodes: 3 diff --git a/manifests/cockroachdb/operator.yaml b/manifests/cockroachdb/operator.yaml index 2db3c37f8..74734c7e9 100644 --- a/manifests/cockroachdb/operator.yaml +++ b/manifests/cockroachdb/operator.yaml @@ -478,6 +478,10 @@ spec: value: cockroachdb/cockroach:v21.1.18 - name: RELATED_IMAGE_COCKROACH_v21_1_19 value: cockroachdb/cockroach:v21.1.19 + - name: RELATED_IMAGE_COCKROACH_v21_1_20 + value: cockroachdb/cockroach:v21.1.20 + - name: RELATED_IMAGE_COCKROACH_v21_1_21 + value: cockroachdb/cockroach:v21.1.21 - name: RELATED_IMAGE_COCKROACH_v21_2_0 value: cockroachdb/cockroach:v21.2.0 - name: RELATED_IMAGE_COCKROACH_v21_2_1 @@ -510,6 +514,8 @@ spec: value: cockroachdb/cockroach:v21.2.15 - name: RELATED_IMAGE_COCKROACH_v21_2_16 value: cockroachdb/cockroach:v21.2.16 + - name: RELATED_IMAGE_COCKROACH_v21_2_17 + value: cockroachdb/cockroach:v21.2.17 - name: RELATED_IMAGE_COCKROACH_v22_1_0 value: cockroachdb/cockroach:v22.1.0 - name: RELATED_IMAGE_COCKROACH_v22_1_1 @@ -526,10 +532,18 @@ spec: value: cockroachdb/cockroach:v22.1.7 - name: RELATED_IMAGE_COCKROACH_v22_1_8 value: cockroachdb/cockroach:v22.1.8 + - name: RELATED_IMAGE_COCKROACH_v22_1_10 + value: cockroachdb/cockroach:v22.1.10 + - name: RELATED_IMAGE_COCKROACH_v22_1_11 + value: cockroachdb/cockroach:v22.1.11 + - name: RELATED_IMAGE_COCKROACH_v22_1_12 + value: cockroachdb/cockroach:v22.1.12 + - name: RELATED_IMAGE_COCKROACH_v22_2_0 + value: cockroachdb/cockroach:v22.2.0 - name: OPERATOR_NAME value: cockroachdb - name: WATCH_NAMESPACE - value: tfs-ccdb + value: crdb - name: POD_NAME valueFrom: fieldRef: @@ -538,7 +552,7 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace - image: cockroachdb/cockroach-operator:v2.8.0 + image: cockroachdb/cockroach-operator:v2.9.0 imagePullPolicy: IfNotPresent name: cockroach-operator resources: diff --git a/scripts/cockroachdb_client.sh b/scripts/cockroachdb_client.sh new file mode 100755 index 000000000..6ac9eea6e --- /dev/null +++ b/scripts/cockroachdb_client.sh @@ -0,0 +1,16 @@ +#!/bin/bash +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +kubectl exec -it cockroachdb-client-secure --namespace crdb -- ./cockroach sql --certs-dir=/cockroach/cockroach-certs --host=cockroachdb-public -- GitLab From b6b9d698529d0f886eb8cbc611f9d95fca11d6de Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Mon, 2 Jan 2023 18:00:00 +0000 Subject: [PATCH 018/158] Context compoent: - progress on migration to CockroachDB (partial) --- scripts/run_tests_locally-context.sh | 6 +- .../service/ContextServiceServicerImpl.py | 641 ++++++++---------- src/context/service/Engine.py | 20 +- src/context/service/__main__.py | 2 +- src/context/service/database/ContextModel.py | 42 +- src/context/service/database/DeviceModel.py | 217 ++++-- src/context/service/database/EndPointModel.py | 114 ++-- src/context/service/database/KpiSampleType.py | 28 - .../service/database/RelationModels.py | 74 +- src/context/service/database/TopologyModel.py | 37 +- src/context/service/database/__init__.py | 4 - src/context/tests/test_unitary.py | 328 +++++---- 12 files changed, 709 insertions(+), 804 deletions(-) delete mode 100644 src/context/service/database/KpiSampleType.py diff --git a/scripts/run_tests_locally-context.sh b/scripts/run_tests_locally-context.sh index ec12d8a80..61f8cee91 100755 --- a/scripts/run_tests_locally-context.sh +++ b/scripts/run_tests_locally-context.sh @@ -36,14 +36,16 @@ cd $PROJECTDIR/src #export REDIS_SERVICE_HOST=$(kubectl get node $TFS_K8S_HOSTNAME -o 'jsonpath={.status.addresses[?(@.type=="InternalIP")].address}') #export REDIS_SERVICE_PORT=$(kubectl --namespace $TFS_K8S_NAMESPACE get service redis-tests -o 'jsonpath={.spec.ports[?(@.port==6379)].nodePort}') -export CRDB_URI="cockroachdb://tfs:tfs123@10.1.7.195:26257/tfs?sslmode=require" +#export CRDB_URI="cockroachdb://tfs:tfs123@127.0.0.1:26257/tfs_test?sslmode=require" +export CRDB_URI="cockroachdb://tfs:tfs123@10.1.7.195:26257/tfs_test?sslmode=require" export PYTHONPATH=/home/tfs/tfs-ctrl/src # Run unitary tests and analyze coverage of code at same time #coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose --maxfail=1 \ # context/tests/test_unitary.py -pytest --log-level=INFO --verbose -o log_cli=true --maxfail=1 \ +# --log-level=INFO -o log_cli=true +pytest --verbose --maxfail=1 --durations=0 \ context/tests/test_unitary.py #kubectl --namespace $TFS_K8S_NAMESPACE delete service redis-tests diff --git a/src/context/service/ContextServiceServicerImpl.py b/src/context/service/ContextServiceServicerImpl.py index 6db5b99e7..2661f25c1 100644 --- a/src/context/service/ContextServiceServicerImpl.py +++ b/src/context/service/ContextServiceServicerImpl.py @@ -15,7 +15,7 @@ import grpc, json, logging, operator, sqlalchemy, threading, time, uuid from sqlalchemy.orm import Session, contains_eager, selectinload, sessionmaker -#from sqlalchemy.dialects.postgresql import UUID, insert +from sqlalchemy.dialects.postgresql import UUID, insert from sqlalchemy_cockroachdb import run_transaction from typing import Dict, Iterator, List, Optional, Set, Tuple, Union from common.message_broker.MessageBroker import MessageBroker @@ -45,16 +45,16 @@ from common.rpc_method_wrapper.ServiceExceptions import ( #from context.service.database.ConstraintModel import ( # ConstraintModel, ConstraintsModel, Union_ConstraintModel, CONSTRAINT_PARSERS, set_constraints) from context.service.database.ContextModel import ContextModel -#from context.service.database.DeviceModel import ( -# DeviceModel, grpc_to_enum__device_operational_status, set_drivers, grpc_to_enum__device_driver, DriverModel) -#from context.service.database.EndPointModel import EndPointModel, KpiSampleTypeModel, set_kpi_sample_types +from context.service.database.DeviceModel import ( + DeviceModel, grpc_to_enum__device_operational_status, grpc_to_enum__device_driver) +from context.service.database.EndPointModel import EndPointModel, grpc_to_enum__kpi_sample_type +#from context.service.database.EndPointModel import EndPointModel, set_kpi_sample_types #from context.service.database.Events import notify_event -#from context.service.database.KpiSampleType import grpc_to_enum__kpi_sample_type #from context.service.database.LinkModel import LinkModel #from context.service.database.PolicyRuleModel import PolicyRuleModel -#from context.service.database.RelationModels import ( +from context.service.database.RelationModels import TopologyDeviceModel # ConnectionSubServiceModel, LinkEndPointModel, ServiceEndPointModel, SliceEndPointModel, SliceServiceModel, -# SliceSubSliceModel, TopologyDeviceModel, TopologyLinkModel) +# SliceSubSliceModel, TopologyLinkModel) #from context.service.database.ServiceModel import ( # ServiceModel, grpc_to_enum__service_status, grpc_to_enum__service_type) #from context.service.database.SliceModel import SliceModel, grpc_to_enum__slice_status @@ -94,34 +94,34 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # ----- Context ---------------------------------------------------------------------------------------------------- @safe_and_metered_rpc_method(METRICS, LOGGER) - def ListContextIds(self, request: Empty, context : grpc.ServicerContext) -> ContextIdList: + def ListContextIds(self, request : Empty, context : grpc.ServicerContext) -> ContextIdList: def callback(session : Session) -> List[Dict]: obj_list : List[ContextModel] = session.query(ContextModel).all() + #.options(selectinload(ContextModel.topology)).filter_by(context_uuid=context_uuid).one_or_none() return [obj.dump_id() for obj in obj_list] return ContextIdList(context_ids=run_transaction(sessionmaker(bind=self.db_engine), callback)) @safe_and_metered_rpc_method(METRICS, LOGGER) - def ListContexts(self, request: Empty, context : grpc.ServicerContext) -> ContextList: + def ListContexts(self, request : Empty, context : grpc.ServicerContext) -> ContextList: def callback(session : Session) -> List[Dict]: obj_list : List[ContextModel] = session.query(ContextModel).all() + #.options(selectinload(ContextModel.topology)).filter_by(context_uuid=context_uuid).one_or_none() return [obj.dump() for obj in obj_list] return ContextList(contexts=run_transaction(sessionmaker(bind=self.db_engine), callback)) @safe_and_metered_rpc_method(METRICS, LOGGER) - def GetContext(self, request: ContextId, context : grpc.ServicerContext) -> Context: + def GetContext(self, request : ContextId, context : grpc.ServicerContext) -> Context: context_uuid = request.context_uuid.uuid def callback(session : Session) -> Optional[Dict]: - obj : Optional[ContextModel] = session\ - .query(ContextModel)\ - .filter_by(context_uuid=context_uuid)\ - .one_or_none() + obj : Optional[ContextModel] = session.query(ContextModel)\ + .filter_by(context_uuid=context_uuid).one_or_none() return None if obj is None else obj.dump() obj = run_transaction(sessionmaker(bind=self.db_engine), callback) if obj is None: raise NotFoundException(ContextModel.__name__.replace('Model', ''), context_uuid) return Context(**obj) @safe_and_metered_rpc_method(METRICS, LOGGER) - def SetContext(self, request: Context, context : grpc.ServicerContext) -> ContextId: + def SetContext(self, request : Context, context : grpc.ServicerContext) -> ContextId: context_uuid = request.context_id.context_uuid.uuid context_name = request.name @@ -147,15 +147,16 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer ['should be == {:s}({:s})'.format('request.context_id.context_uuid.uuid', context_uuid)]) def callback(session : Session) -> Tuple[Optional[Dict], bool]: - obj : Optional[ContextModel] = \ - session.query(ContextModel).with_for_update().filter_by(context_uuid=context_uuid).one_or_none() + obj : Optional[ContextModel] = session.query(ContextModel).with_for_update()\ + .filter_by(context_uuid=context_uuid).one_or_none() is_update = obj is not None if is_update: obj.context_name = context_name session.merge(obj) else: session.add(ContextModel(context_uuid=context_uuid, context_name=context_name, created_at=time.time())) - obj = session.get(ContextModel, {'context_uuid': context_uuid}) + obj : Optional[ContextModel] = session.query(ContextModel)\ + .filter_by(context_uuid=context_uuid).one_or_none() return (None if obj is None else obj.dump_id()), is_update obj_id,updated = run_transaction(sessionmaker(bind=self.db_engine), callback) @@ -166,7 +167,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer return ContextId(**obj_id) @safe_and_metered_rpc_method(METRICS, LOGGER) - def RemoveContext(self, request: ContextId, context : grpc.ServicerContext) -> Empty: + def RemoveContext(self, request : ContextId, context : grpc.ServicerContext) -> Empty: context_uuid = request.context_uuid.uuid def callback(session : Session) -> bool: @@ -179,7 +180,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer return Empty() @safe_and_metered_rpc_method(METRICS, LOGGER) - def GetContextEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[ContextEvent]: + def GetContextEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[ContextEvent]: pass #for message in self.messagebroker.consume({TOPIC_CONTEXT}, consume_timeout=CONSUME_TIMEOUT): # yield ContextEvent(**json.loads(message.content)) @@ -201,174 +202,232 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # ----- Topology --------------------------------------------------------------------------------------------------- @safe_and_metered_rpc_method(METRICS, LOGGER) - def ListTopologyIds(self, request: ContextId, context : grpc.ServicerContext) -> TopologyIdList: + def ListTopologyIds(self, request : ContextId, context : grpc.ServicerContext) -> TopologyIdList: context_uuid = request.context_uuid.uuid - def callback(session : Session) -> List[Dict]: obj_list : List[TopologyModel] = session.query(TopologyModel).filter_by(context_uuid=context_uuid).all() #.options(selectinload(ContextModel.topology)).filter_by(context_uuid=context_uuid).one_or_none() return [obj.dump_id() for obj in obj_list] - - #with self.session() as session: - # result = session.query(ContextModel).options(selectinload(ContextModel.topology)).filter_by(context_uuid=context_uuid).one_or_none() - # if not result: - # raise NotFoundException(ContextModel.__name__.replace('Model', ''), context_uuid) - # db_topologies = result.topology return TopologyIdList(topology_ids=run_transaction(sessionmaker(bind=self.db_engine), callback)) @safe_and_metered_rpc_method(METRICS, LOGGER) - def ListTopologies(self, request: ContextId, context : grpc.ServicerContext) -> TopologyList: + def ListTopologies(self, request : ContextId, context : grpc.ServicerContext) -> TopologyList: context_uuid = request.context_uuid.uuid - def callback(session : Session) -> List[Dict]: obj_list : List[TopologyModel] = session.query(TopologyModel).filter_by(context_uuid=context_uuid).all() #.options(selectinload(ContextModel.topology)).filter_by(context_uuid=context_uuid).one_or_none() return [obj.dump() for obj in obj_list] - - #with self.session() as session: - # result = session.query(ContextModel).options(selectinload(ContextModel.topology)).filter_by( - # context_uuid=context_uuid).one_or_none() - # if not result: - # raise NotFoundException(ContextModel.__name__.replace('Model', ''), context_uuid) - # db_topologies = result.topology return TopologyList(topologies=run_transaction(sessionmaker(bind=self.db_engine), callback)) @safe_and_metered_rpc_method(METRICS, LOGGER) - def GetTopology(self, request: TopologyId, context : grpc.ServicerContext) -> Topology: + def GetTopology(self, request : TopologyId, context : grpc.ServicerContext) -> Topology: context_uuid = request.context_id.context_uuid.uuid topology_uuid = request.topology_uuid.uuid def callback(session : Session) -> Optional[Dict]: - obj : Optional[TopologyModel] = session\ - .query(TopologyModel)\ - .filter_by(context_uuid=context_uuid, topology_uuid=topology_uuid)\ - .one_or_none() + obj : Optional[TopologyModel] = session.query(TopologyModel)\ + .filter_by(context_uuid=context_uuid, topology_uuid=topology_uuid).one_or_none() return None if obj is None else obj.dump() obj = run_transaction(sessionmaker(bind=self.db_engine), callback) - if obj is None: raise NotFoundException(TopologyModel.__name__.replace('Model', ''), context_uuid) + if obj is None: + obj_uuid = '{:s}/{:s}'.format(context_uuid, topology_uuid) + raise NotFoundException(TopologyModel.__name__.replace('Model', ''), obj_uuid) return Topology(**obj) -# result, dump = self.database.get_object(TopologyModel, topology_uuid, True) -# with self.session() as session: -# devs = None -# links = None -# -# filt = {'topology_uuid': topology_uuid} -# topology_devices = session.query(TopologyDeviceModel).filter_by(**filt).all() -# if topology_devices: -# devs = [] -# for td in topology_devices: -# filt = {'device_uuid': td.device_uuid} -# devs.append(session.query(DeviceModel).filter_by(**filt).one()) -# -# filt = {'topology_uuid': topology_uuid} -# topology_links = session.query(TopologyLinkModel).filter_by(**filt).all() -# if topology_links: -# links = [] -# for tl in topology_links: -# filt = {'link_uuid': tl.link_uuid} -# links.append(session.query(LinkModel).filter_by(**filt).one()) -# -# return Topology(**result.dump(devs, links)) + @safe_and_metered_rpc_method(METRICS, LOGGER) + def SetTopology(self, request : Topology, context : grpc.ServicerContext) -> TopologyId: + context_uuid = request.topology_id.context_id.context_uuid.uuid + topology_uuid = request.topology_id.topology_uuid.uuid + topology_name = request.name -# @safe_and_metered_rpc_method(METRICS, LOGGER) -# def SetTopology(self, request: Topology, context : grpc.ServicerContext) -> TopologyId: -# context_uuid = request.topology_id.context_id.context_uuid.uuid -# topology_uuid = request.topology_id.topology_uuid.uuid -# with self.session() as session: -# topology_add = TopologyModel(topology_uuid=topology_uuid, context_uuid=context_uuid) -# updated = True -# db_topology = session.query(TopologyModel).join(TopologyModel.context).filter(TopologyModel.topology_uuid==topology_uuid).one_or_none() -# if not db_topology: -# updated = False -# session.merge(topology_add) -# session.commit() -# db_topology = session.query(TopologyModel).join(TopologyModel.context).filter(TopologyModel.topology_uuid==topology_uuid).one_or_none() -# -# for device_id in request.device_ids: -# device_uuid = device_id.device_uuid.uuid -# td = TopologyDeviceModel(topology_uuid=topology_uuid, device_uuid=device_uuid) -# result: Tuple[TopologyDeviceModel, bool] = self.database.create_or_update(td) -# -# -# for link_id in request.link_ids: -# link_uuid = link_id.link_uuid.uuid -# db_link = session.query(LinkModel).filter( -# LinkModel.link_uuid == link_uuid).one_or_none() -# tl = TopologyLinkModel(topology_uuid=topology_uuid, link_uuid=link_uuid) -# result: Tuple[TopologyDeviceModel, bool] = self.database.create_or_update(tl) -# -# -# -# event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE -# dict_topology_id = db_topology.dump_id() -# notify_event(self.messagebroker, TOPIC_TOPOLOGY, event_type, {'topology_id': dict_topology_id}) -# return TopologyId(**dict_topology_id) + devices_to_add : List[str] = [ + {'context_uuid': context_uuid, 'topology_uuid': topology_uuid, 'device_uuid': device_id.device_uuid.uuid} + for device_id in request.device_ids + ] + links_to_add : List[str] = [ + {'context_uuid': context_uuid, 'topology_uuid': topology_uuid, 'link_uuid': link_id.link_uuid.uuid} + for link_id in request.link_ids + ] + print('devices_to_add', devices_to_add) -# @safe_and_metered_rpc_method(METRICS, LOGGER) -# def RemoveTopology(self, request: TopologyId, context : grpc.ServicerContext) -> Empty: -# context_uuid = request.context_id.context_uuid.uuid -# topology_uuid = request.topology_uuid.uuid -# -# with self.session() as session: -# result = session.query(TopologyModel).filter_by(topology_uuid=topology_uuid, context_uuid=context_uuid).one_or_none() -# if not result: -# return Empty() -# dict_topology_id = result.dump_id() -# -# session.query(TopologyModel).filter_by(topology_uuid=topology_uuid, context_uuid=context_uuid).delete() -# session.commit() -# event_type = EventTypeEnum.EVENTTYPE_REMOVE -# notify_event(self.messagebroker, TOPIC_TOPOLOGY, event_type, {'topology_id': dict_topology_id}) -# return Empty() + def callback(session : Session) -> Tuple[Optional[Dict], bool]: + topology_data = [{ + 'context_uuid' : context_uuid, + 'topology_uuid': topology_uuid, + 'topology_name': topology_name, + 'created_at' : time.time(), + }] + stmt = insert(TopologyModel).values(topology_data) + stmt = stmt.on_conflict_do_update( + index_elements=[TopologyModel.context_uuid, TopologyModel.topology_uuid], + set_=dict(topology_name = stmt.excluded.topology_name) + ) + session.execute(stmt) -# @safe_and_metered_rpc_method(METRICS, LOGGER) -# def GetTopologyEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[TopologyEvent]: -# for message in self.messagebroker.consume({TOPIC_TOPOLOGY}, consume_timeout=CONSUME_TIMEOUT): -# yield TopologyEvent(**json.loads(message.content)) + if len(devices_to_add) > 0: + session.execute(insert(TopologyDeviceModel).values(devices_to_add).on_conflict_do_nothing( + index_elements=[ + TopologyDeviceModel.context_uuid, TopologyDeviceModel.topology_uuid, + TopologyDeviceModel.device_uuid + ] + )) + + #if len(link_to_add) > 0: + # session.execute(insert(TopologyLinkModel).values(link_to_add).on_conflict_do_nothing( + # index_elements=[ + # TopologyLinkModel.context_uuid, TopologyLinkModel.topology_uuid, + # TopologyLinkModel.link_uuid + # ] + # )) + + is_update = True + obj : Optional[TopologyModel] = session.query(TopologyModel)\ + .filter_by(context_uuid=context_uuid, topology_uuid=topology_uuid).one_or_none() + return (None if obj is None else obj.dump_id()), is_update + + obj_id,updated = run_transaction(sessionmaker(bind=self.db_engine), callback) + if obj_id is None: raise NotFoundException(ContextModel.__name__.replace('Model', ''), context_uuid) + + #event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE + #notify_event(self.messagebroker, TOPIC_TOPOLOGY, event_type, {'topology_id': obj_id}) + return TopologyId(**obj_id) + + @safe_and_metered_rpc_method(METRICS, LOGGER) + def RemoveTopology(self, request : TopologyId, context : grpc.ServicerContext) -> Empty: + context_uuid = request.context_id.context_uuid.uuid + topology_uuid = request.topology_uuid.uuid + + def callback(session : Session) -> bool: + num_deleted = session.query(TopologyModel)\ + .filter_by(context_uuid=context_uuid, topology_uuid=topology_uuid).delete() + return num_deleted > 0 + + deleted = run_transaction(sessionmaker(bind=self.db_engine), callback) + #if deleted: + # notify_event(self.messagebroker, TOPIC_TOPOLOGY, EventTypeEnum.EVENTTYPE_REMOVE, {'topology_id': request}) + return Empty() + + @safe_and_metered_rpc_method(METRICS, LOGGER) + def GetTopologyEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[TopologyEvent]: + pass + #for message in self.messagebroker.consume({TOPIC_TOPOLOGY}, consume_timeout=CONSUME_TIMEOUT): + # yield TopologyEvent(**json.loads(message.content)) + + # ----- Device ----------------------------------------------------------------------------------------------------- + + @safe_and_metered_rpc_method(METRICS, LOGGER) + def ListDeviceIds(self, request : Empty, context : grpc.ServicerContext) -> DeviceIdList: + def callback(session : Session) -> List[Dict]: + obj_list : List[DeviceModel] = session.query(DeviceModel).all() + #.options(selectinload(DeviceModel.topology)).filter_by(context_uuid=context_uuid).one_or_none() + return [obj.dump_id() for obj in obj_list] + return DeviceIdList(device_ids=run_transaction(sessionmaker(bind=self.db_engine), callback)) + + @safe_and_metered_rpc_method(METRICS, LOGGER) + def ListDevices(self, request : Empty, context : grpc.ServicerContext) -> DeviceList: + def callback(session : Session) -> List[Dict]: + obj_list : List[DeviceModel] = session.query(DeviceModel).all() + #.options(selectinload(DeviceModel.topology)).filter_by(context_uuid=context_uuid).one_or_none() + return [obj.dump() for obj in obj_list] + return DeviceList(devices=run_transaction(sessionmaker(bind=self.db_engine), callback)) + + @safe_and_metered_rpc_method(METRICS, LOGGER) + def GetDevice(self, request : ContextId, context : grpc.ServicerContext) -> Device: + device_uuid = request.device_uuid.uuid + def callback(session : Session) -> Optional[Dict]: + obj : Optional[DeviceModel] = session.query(DeviceModel)\ + .filter_by(device_uuid=device_uuid).one_or_none() + return None if obj is None else obj.dump() + obj = run_transaction(sessionmaker(bind=self.db_engine), callback) + if obj is None: raise NotFoundException(DeviceModel.__name__.replace('Model', ''), device_uuid) + return Device(**obj) + + @safe_and_metered_rpc_method(METRICS, LOGGER) + def SetDevice(self, request : Device, context : grpc.ServicerContext) -> DeviceId: + device_uuid = request.device_id.device_uuid.uuid + device_name = request.name + device_type = request.device_type + oper_status = grpc_to_enum__device_operational_status(request.device_operational_status) + device_drivers = [grpc_to_enum__device_driver(d) for d in request.device_drivers] + + related_topology_uuids : Set[Tuple[str, str]] = set() + endpoints_data : List[Dict] = list() + for i, endpoint in enumerate(request.device_endpoints): + endpoint_device_uuid = endpoint.endpoint_id.device_id.device_uuid.uuid + if len(endpoint_device_uuid) == 0: endpoint_device_uuid = device_uuid + if device_uuid != endpoint_device_uuid: + raise InvalidArgumentException( + 'request.device_endpoints[{:d}].device_id.device_uuid.uuid'.format(i), endpoint_device_uuid, + ['should be == {:s}({:s})'.format('request.device_id.device_uuid.uuid', device_uuid)]) + + endpoint_context_uuid = endpoint.endpoint_id.topology_id.context_id.context_uuid.uuid + endpoint_topology_uuid = endpoint.endpoint_id.topology_id.topology_uuid.uuid + + kpi_sample_types = [grpc_to_enum__kpi_sample_type(kst) for kst in endpoint.kpi_sample_types] + + endpoints_data.append({ + 'context_uuid' : endpoint_context_uuid, + 'topology_uuid' : endpoint_topology_uuid, + 'device_uuid' : endpoint_device_uuid, + 'endpoint_uuid' : endpoint.endpoint_id.endpoint_uuid.uuid, + 'endpoint_type' : endpoint.endpoint_type, + 'kpi_sample_types': kpi_sample_types, + }) + + if len(endpoint_context_uuid) > 0 and len(endpoint_topology_uuid) > 0: + related_topology_uuids.add({ + 'context_uuid': endpoint_context_uuid, + 'topology_uuid': endpoint_topology_uuid, + 'device_uuid': endpoint_device_uuid, + }) + + def callback(session : Session) -> Tuple[Optional[Dict], bool]: + obj : Optional[DeviceModel] = session.query(DeviceModel).with_for_update()\ + .filter_by(device_uuid=device_uuid).one_or_none() + is_update = obj is not None + if is_update: + obj.device_name = device_name + obj.device_type = device_type + obj.device_operational_status = oper_status + obj.device_drivers = device_drivers + session.merge(obj) + else: + session.add(DeviceModel( + device_uuid=device_uuid, device_name=device_name, device_type=device_type, + device_operational_status=oper_status, device_drivers=device_drivers, created_at=time.time())) + obj : Optional[DeviceModel] = session.query(DeviceModel)\ + .filter_by(device_uuid=device_uuid).one_or_none() + + stmt = insert(EndPointModel).values(endpoints_data) + stmt = stmt.on_conflict_do_update( + index_elements=[ + EndPointModel.context_uuid, EndPointModel.topology_uuid, EndPointModel.device_uuid, + EndPointModel.endpoint_uuid + ], + set_=dict( + endpoint_type = stmt.excluded.endpoint_type, + kpi_sample_types = stmt.excluded.kpi_sample_types, + ) + ) + session.execute(stmt) + + session.execute(insert(TopologyDeviceModel).values(list(related_topology_uuids)).on_conflict_do_nothing( + index_elements=[ + TopologyDeviceModel.context_uuid, TopologyDeviceModel.topology_uuid, + TopologyDeviceModel.device_uuid + ] + )) + + return (None if obj is None else obj.dump_id()), is_update + + obj_id,updated = run_transaction(sessionmaker(bind=self.db_engine), callback) + if obj_id is None: raise NotFoundException(DeviceModel.__name__.replace('Model', ''), device_uuid) + + #event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE + #notify_event(self.messagebroker, TOPIC_DEVICE, event_type, {'device_id': obj_id}) + return DeviceId(**obj_id) -# # ----- Device ----------------------------------------------------------------------------------------------------- -# -# @safe_and_metered_rpc_method(METRICS, LOGGER) -# def ListDeviceIds(self, request: Empty, context : grpc.ServicerContext) -> DeviceIdList: -# with self.session() as session: -# result = session.query(DeviceModel).all() -# return DeviceIdList(device_ids=[device.dump_id() for device in result]) -# -# @safe_and_metered_rpc_method(METRICS, LOGGER) -# def ListDevices(self, request: Empty, context : grpc.ServicerContext) -> DeviceList: -# with self.session() as session: -# result = session.query(DeviceModel).all() -# return DeviceList(devices=[device.dump() for device in result]) -# -# @safe_and_metered_rpc_method(METRICS, LOGGER) -# def GetDevice(self, request: DeviceId, context : grpc.ServicerContext) -> Device: -# device_uuid = request.device_uuid.uuid -# with self.session() as session: -# result = session.query(DeviceModel).filter(DeviceModel.device_uuid == device_uuid).one_or_none() -# if not result: -# raise NotFoundException(DeviceModel.__name__.replace('Model', ''), device_uuid) -# -# rd = result.dump(include_config_rules=True, include_drivers=True, include_endpoints=True) -# -# rt = Device(**rd) -# -# return rt -# -# @safe_and_metered_rpc_method(METRICS, LOGGER) -# def SetDevice(self, request: Device, context : grpc.ServicerContext) -> DeviceId: # with self.session() as session: -# device_uuid = request.device_id.device_uuid.uuid -# -# for i, endpoint in enumerate(request.device_endpoints): -# endpoint_device_uuid = endpoint.endpoint_id.device_id.device_uuid.uuid -# if len(endpoint_device_uuid) == 0: -# endpoint_device_uuid = device_uuid -# if device_uuid != endpoint_device_uuid: -# raise InvalidArgumentException( -# 'request.device_endpoints[{:d}].device_id.device_uuid.uuid'.format(i), endpoint_device_uuid, -# ['should be == {:s}({:s})'.format('request.device_id.device_uuid.uuid', device_uuid)]) -# # config_rules = grpc_config_rules_to_raw(request.device_config.config_rules) # running_config_result = self.update_config(session, device_uuid, 'device', config_rules) # db_running_config = running_config_result[0][0] @@ -388,198 +447,42 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # # self.set_drivers(db_device, request.device_drivers) # -# for i, endpoint in enumerate(request.device_endpoints): -# endpoint_uuid = endpoint.endpoint_id.endpoint_uuid.uuid -# # endpoint_device_uuid = endpoint.endpoint_id.device_id.device_uuid.uuid -# # if len(endpoint_device_uuid) == 0: -# # endpoint_device_uuid = device_uuid -# -# endpoint_attributes = { -# 'device_uuid' : db_device.device_uuid, -# 'endpoint_uuid': endpoint_uuid, -# 'endpoint_type': endpoint.endpoint_type, -# } -# -# endpoint_topology_context_uuid = endpoint.endpoint_id.topology_id.context_id.context_uuid.uuid -# endpoint_topology_uuid = endpoint.endpoint_id.topology_id.topology_uuid.uuid -# if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0: -# # str_topology_key = key_to_str([endpoint_topology_context_uuid, endpoint_topology_uuid]) -# -# db_topology, topo_dump = self.database.get_object(TopologyModel, endpoint_topology_uuid) -# -# topology_device = TopologyDeviceModel( -# topology_uuid=endpoint_topology_uuid, -# device_uuid=db_device.device_uuid) -# self.database.create_or_update(topology_device) -# -# endpoint_attributes['topology_uuid'] = db_topology.topology_uuid -# result : Tuple[EndPointModel, bool] = update_or_create_object( -# self.database, EndPointModel, str_endpoint_key, endpoint_attributes) -# db_endpoint, endpoint_updated = result # pylint: disable=unused-variable -# -# new_endpoint = EndPointModel(**endpoint_attributes) -# result: Tuple[EndPointModel, bool] = self.database.create_or_update(new_endpoint) -# db_endpoint, updated = result -# -# self.set_kpi_sample_types(db_endpoint, endpoint.kpi_sample_types) -# -# # event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE -# dict_device_id = db_device.dump_id() -# # notify_event(self.messagebroker, TOPIC_DEVICE, event_type, {'device_id': dict_device_id}) -# -# return DeviceId(**dict_device_id) -# -# def set_kpi_sample_types(self, db_endpoint: EndPointModel, grpc_endpoint_kpi_sample_types): -# db_endpoint_pk = db_endpoint.endpoint_uuid -# for kpi_sample_type in grpc_endpoint_kpi_sample_types: -# orm_kpi_sample_type = grpc_to_enum__kpi_sample_type(kpi_sample_type) -# # str_endpoint_kpi_sample_type_key = key_to_str([db_endpoint_pk, orm_kpi_sample_type.name]) -# data = {'endpoint_uuid': db_endpoint_pk, -# 'kpi_sample_type': orm_kpi_sample_type.name, -# 'kpi_uuid': str(uuid.uuid4())} -# db_endpoint_kpi_sample_type = KpiSampleTypeModel(**data) -# self.database.create(db_endpoint_kpi_sample_type) -# -# def set_drivers(self, db_device: DeviceModel, grpc_device_drivers): -# db_device_pk = db_device.device_uuid -# for driver in grpc_device_drivers: -# orm_driver = grpc_to_enum__device_driver(driver) -# str_device_driver_key = key_to_str([db_device_pk, orm_driver.name]) -# driver_config = { -# # "driver_uuid": str(uuid.uuid4()), -# "device_uuid": db_device_pk, -# "driver": orm_driver.name -# } -# db_device_driver = DriverModel(**driver_config) -# db_device_driver.device_fk = db_device -# db_device_driver.driver = orm_driver -# -# self.database.create_or_update(db_device_driver) -# -# def update_config( -# self, session, db_parent_pk: str, config_name: str, -# raw_config_rules: List[Tuple[ORM_ConfigActionEnum, str, str]] -# ) -> List[Tuple[Union[ConfigModel, ConfigRuleModel], bool]]: -# -# created = False -# -# db_config = session.query(ConfigModel).filter_by(**{ConfigModel.main_pk_name(): db_parent_pk}).one_or_none() -# if not db_config: -# db_config = ConfigModel() -# setattr(db_config, ConfigModel.main_pk_name(), db_parent_pk) -# session.add(db_config) -# session.commit() -# created = True -# -# LOGGER.info('UPDATED-CONFIG: {}'.format(db_config.dump())) -# -# db_objects: List[Tuple[Union[ConfigModel, ConfigRuleModel], bool]] = [(db_config, created)] -# -# for position, (action, resource_key, resource_value) in enumerate(raw_config_rules): -# if action == ORM_ConfigActionEnum.SET: -# result : Tuple[ConfigRuleModel, bool] = self.set_config_rule( -# db_config, position, resource_key, resource_value) -# db_config_rule, updated = result -# db_objects.append((db_config_rule, updated)) -# elif action == ORM_ConfigActionEnum.DELETE: -# self.delete_config_rule(db_config, resource_key) -# else: -# msg = 'Unsupported action({:s}) for resource_key({:s})/resource_value({:s})' -# raise AttributeError( -# msg.format(str(ConfigActionEnum.Name(action)), str(resource_key), str(resource_value))) -# -# return db_objects -# -# def set_config_rule(self, db_config: ConfigModel, position: int, resource_key: str, resource_value: str, -# ): # -> Tuple[ConfigRuleModel, bool]: -# -# from src.context.service.database.Tools import fast_hasher -# str_rule_key_hash = fast_hasher(resource_key) -# str_config_rule_key = key_to_str([db_config.config_uuid, str_rule_key_hash], separator=':') -# pk = str(uuid.uuid5(uuid.UUID('9566448d-e950-425e-b2ae-7ead656c7e47'), str_config_rule_key)) -# data = {'config_rule_uuid': pk, 'config_uuid': db_config.config_uuid, 'position': position, -# 'action': ORM_ConfigActionEnum.SET, 'key': resource_key, 'value': resource_value} -# to_add = ConfigRuleModel(**data) -# -# result, updated = self.database.create_or_update(to_add) -# return result, updated -# -# def delete_config_rule( -# self, db_config: ConfigModel, resource_key: str -# ) -> None: -# -# from src.context.service.database.Tools import fast_hasher -# str_rule_key_hash = fast_hasher(resource_key) -# str_config_rule_key = key_to_str([db_config.pk, str_rule_key_hash], separator=':') -# -# db_config_rule = self.database.get_object(ConfigRuleModel, str_config_rule_key, raise_if_not_found=False) -# -# if db_config_rule is None: -# return -# db_config_rule.delete() -# -# def delete_all_config_rules(self, db_config: ConfigModel) -> None: -# -# db_config_rule_pks = db_config.references(ConfigRuleModel) -# for pk, _ in db_config_rule_pks: ConfigRuleModel(self.database, pk).delete() -# -# """ -# for position, (action, resource_key, resource_value) in enumerate(raw_config_rules): -# if action == ORM_ConfigActionEnum.SET: -# result: Tuple[ConfigRuleModel, bool] = set_config_rule( -# database, db_config, position, resource_key, resource_value) -# db_config_rule, updated = result -# db_objects.append((db_config_rule, updated)) -# elif action == ORM_ConfigActionEnum.DELETE: -# delete_config_rule(database, db_config, resource_key) -# else: -# msg = 'Unsupported action({:s}) for resource_key({:s})/resource_value({:s})' -# raise AttributeError( -# msg.format(str(ConfigActionEnum.Name(action)), str(resource_key), str(resource_value))) -# -# return db_objects -# """ -# -# @safe_and_metered_rpc_method(METRICS, LOGGER) -# def RemoveDevice(self, request: DeviceId, context : grpc.ServicerContext) -> Empty: -# device_uuid = request.device_uuid.uuid -# -# with self.session() as session: -# db_device = session.query(DeviceModel).filter_by(device_uuid=device_uuid).one_or_none() -# -# session.query(TopologyDeviceModel).filter_by(device_uuid=device_uuid).delete() -# session.query(ConfigRuleModel).filter_by(config_uuid=db_device.device_config_uuid).delete() -# session.query(ConfigModel).filter_by(config_uuid=db_device.device_config_uuid).delete() -# -# if not db_device: -# return Empty() -# dict_device_id = db_device.dump_id() -# -# session.query(DeviceModel).filter_by(device_uuid=device_uuid).delete() -# session.commit() -# event_type = EventTypeEnum.EVENTTYPE_REMOVE -# notify_event(self.messagebroker, TOPIC_DEVICE, event_type, {'device_id': dict_device_id}) -# return Empty() -# -## @safe_and_metered_rpc_method(METRICS, LOGGER) -## def GetDeviceEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[DeviceEvent]: -## for message in self.messagebroker.consume({TOPIC_DEVICE}, consume_timeout=CONSUME_TIMEOUT): -## yield DeviceEvent(**json.loads(message.content)) -# -# -# # + + @safe_and_metered_rpc_method(METRICS, LOGGER) + def RemoveDevice(self, request : DeviceId, context : grpc.ServicerContext) -> Empty: + device_uuid = request.device_uuid.uuid + def callback(session : Session) -> bool: + session.query(TopologyDeviceModel).filter_by(device_uuid=device_uuid).delete() + num_deleted = session.query(DeviceModel).filter_by(device_uuid=device_uuid).delete() + #db_device = session.query(DeviceModel).filter_by(device_uuid=device_uuid).one_or_none() + #session.query(ConfigRuleModel).filter_by(config_uuid=db_device.device_config_uuid).delete() + #session.query(ConfigModel).filter_by(config_uuid=db_device.device_config_uuid).delete() + #session.query(DeviceModel).filter_by(device_uuid=device_uuid).delete() + return num_deleted > 0 + deleted = run_transaction(sessionmaker(bind=self.db_engine), callback) + #if deleted: + # notify_event(self.messagebroker, TOPIC_DEVICE, EventTypeEnum.EVENTTYPE_REMOVE, {'device_id': request}) + return Empty() + + @safe_and_metered_rpc_method(METRICS, LOGGER) + def GetDeviceEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[DeviceEvent]: + pass + #for message in self.messagebroker.consume({TOPIC_DEVICE}, consume_timeout=CONSUME_TIMEOUT): + # yield DeviceEvent(**json.loads(message.content)) + + # # ----- Link ------------------------------------------------------------------------------------------------------- # # @safe_and_metered_rpc_method(METRICS, LOGGER) -# def ListLinkIds(self, request: Empty, context : grpc.ServicerContext) -> LinkIdList: +# def ListLinkIds(self, request : Empty, context : grpc.ServicerContext) -> LinkIdList: # with self.session() as session: # result = session.query(LinkModel).all() # return LinkIdList(link_ids=[db_link.dump_id() for db_link in result]) # # # @safe_and_metered_rpc_method(METRICS, LOGGER) -# def ListLinks(self, request: Empty, context : grpc.ServicerContext) -> LinkList: +# def ListLinks(self, request : Empty, context : grpc.ServicerContext) -> LinkList: # with self.session() as session: # link_list = LinkList() # @@ -599,7 +502,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # return link_list # # @safe_and_metered_rpc_method(METRICS, LOGGER) -# def GetLink(self, request: LinkId, context : grpc.ServicerContext) -> Link: +# def GetLink(self, request : LinkId, context : grpc.ServicerContext) -> Link: # link_uuid = request.link_uuid.uuid # with self.session() as session: # result = session.query(LinkModel).filter(LinkModel.link_uuid == link_uuid).one_or_none() @@ -623,7 +526,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # # # @safe_and_metered_rpc_method(METRICS, LOGGER) -# def SetLink(self, request: Link, context : grpc.ServicerContext) -> LinkId: +# def SetLink(self, request : Link, context : grpc.ServicerContext) -> LinkId: # link_uuid = request.link_id.link_uuid.uuid # # new_link = LinkModel(**{ @@ -659,7 +562,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # return LinkId(**dict_link_id) # # @safe_and_metered_rpc_method(METRICS, LOGGER) -# def RemoveLink(self, request: LinkId, context : grpc.ServicerContext) -> Empty: +# def RemoveLink(self, request : LinkId, context : grpc.ServicerContext) -> Empty: # with self.session() as session: # link_uuid = request.link_uuid.uuid # @@ -678,7 +581,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # return Empty() # ## @safe_and_metered_rpc_method(METRICS, LOGGER) -## def GetLinkEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[LinkEvent]: +## def GetLinkEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[LinkEvent]: ## for message in self.messagebroker.consume({TOPIC_LINK}, consume_timeout=CONSUME_TIMEOUT): ## yield LinkEvent(**json.loads(message.content)) # @@ -686,7 +589,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # # ----- Service ---------------------------------------------------------------------------------------------------- # # @safe_and_metered_rpc_method(METRICS, LOGGER) -# def ListServiceIds(self, request: ContextId, context : grpc.ServicerContext) -> ServiceIdList: +# def ListServiceIds(self, request : ContextId, context : grpc.ServicerContext) -> ServiceIdList: # context_uuid = request.context_uuid.uuid # # with self.session() as session: @@ -694,7 +597,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # return ServiceIdList(service_ids=[db_service.dump_id() for db_service in db_services]) # # @safe_and_metered_rpc_method(METRICS, LOGGER) -# def ListServices(self, request: ContextId, context : grpc.ServicerContext) -> ServiceList: +# def ListServices(self, request : ContextId, context : grpc.ServicerContext) -> ServiceList: # context_uuid = request.context_uuid.uuid # # with self.session() as session: @@ -704,7 +607,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # # # @safe_and_metered_rpc_method(METRICS, LOGGER) -# def GetService(self, request: ServiceId, context : grpc.ServicerContext) -> Service: +# def GetService(self, request : ServiceId, context : grpc.ServicerContext) -> Service: # service_uuid = request.service_uuid.uuid # with self.session() as session: # result = session.query(ServiceModel).filter_by(service_uuid=service_uuid).one_or_none() @@ -775,7 +678,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # return db_objects # # @safe_and_metered_rpc_method(METRICS, LOGGER) -# def SetService(self, request: Service, context : grpc.ServicerContext) -> ServiceId: +# def SetService(self, request : Service, context : grpc.ServicerContext) -> ServiceId: # with self.lock: # with self.session() as session: # @@ -893,7 +796,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # return ServiceId(**dict_service_id) # # @safe_and_metered_rpc_method(METRICS, LOGGER) -# def RemoveService(self, request: ServiceId, context : grpc.ServicerContext) -> Empty: +# def RemoveService(self, request : ServiceId, context : grpc.ServicerContext) -> Empty: # with self.lock: # context_uuid = request.context_id.context_uuid.uuid # service_uuid = request.service_uuid.uuid @@ -909,7 +812,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # return Empty() # ## @safe_and_metered_rpc_method(METRICS, LOGGER) -## def GetServiceEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[ServiceEvent]: +## def GetServiceEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[ServiceEvent]: ## for message in self.messagebroker.consume({TOPIC_SERVICE}, consume_timeout=CONSUME_TIMEOUT): ## yield ServiceEvent(**json.loads(message.content)) # @@ -917,7 +820,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # # ----- Slice ---------------------------------------------------------------------------------------------------- # # @safe_and_metered_rpc_method(METRICS, LOGGER) -# def ListSliceIds(self, request: ContextId, context : grpc.ServicerContext) -> SliceIdList: +# def ListSliceIds(self, request : ContextId, context : grpc.ServicerContext) -> SliceIdList: # with self.lock: # db_context : ContextModel = get_object(self.database, ContextModel, request.context_uuid.uuid) # db_slices : Set[SliceModel] = get_related_objects(db_context, SliceModel) @@ -925,7 +828,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # return SliceIdList(slice_ids=[db_slice.dump_id() for db_slice in db_slices]) # # @safe_and_metered_rpc_method(METRICS, LOGGER) -# def ListSlices(self, request: ContextId, context : grpc.ServicerContext) -> SliceList: +# def ListSlices(self, request : ContextId, context : grpc.ServicerContext) -> SliceList: # with self.lock: # db_context : ContextModel = get_object(self.database, ContextModel, request.context_uuid.uuid) # db_slices : Set[SliceModel] = get_related_objects(db_context, SliceModel) @@ -933,7 +836,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # return SliceList(slices=[db_slice.dump() for db_slice in db_slices]) # # @safe_and_metered_rpc_method(METRICS, LOGGER) -# def GetSlice(self, request: SliceId, context : grpc.ServicerContext) -> Slice: +# def GetSlice(self, request : SliceId, context : grpc.ServicerContext) -> Slice: # with self.lock: # str_key = key_to_str([request.context_id.context_uuid.uuid, request.slice_uuid.uuid]) # db_slice : SliceModel = get_object(self.database, SliceModel, str_key) @@ -942,7 +845,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # include_service_ids=True, include_subslice_ids=True)) # # @safe_and_metered_rpc_method(METRICS, LOGGER) -# def SetSlice(self, request: Slice, context : grpc.ServicerContext) -> SliceId: +# def SetSlice(self, request : Slice, context : grpc.ServicerContext) -> SliceId: # with self.lock: # context_uuid = request.slice_id.context_id.context_uuid.uuid # db_context : ContextModel = get_object(self.database, ContextModel, context_uuid) @@ -1027,7 +930,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # return SliceId(**dict_slice_id) # # @safe_and_metered_rpc_method(METRICS, LOGGER) -# def UnsetSlice(self, request: Slice, context : grpc.ServicerContext) -> SliceId: +# def UnsetSlice(self, request : Slice, context : grpc.ServicerContext) -> SliceId: # with self.lock: # context_uuid = request.slice_id.context_id.context_uuid.uuid # db_context : ContextModel = get_object(self.database, ContextModel, context_uuid) @@ -1076,7 +979,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # return SliceId(**dict_slice_id) # # @safe_and_metered_rpc_method(METRICS, LOGGER) -# def RemoveSlice(self, request: SliceId, context : grpc.ServicerContext) -> Empty: +# def RemoveSlice(self, request : SliceId, context : grpc.ServicerContext) -> Empty: # with self.lock: # context_uuid = request.context_id.context_uuid.uuid # slice_uuid = request.slice_uuid.uuid @@ -1092,7 +995,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # return Empty() # ## @safe_and_metered_rpc_method(METRICS, LOGGER) -## def GetSliceEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[SliceEvent]: +## def GetSliceEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[SliceEvent]: ## for message in self.messagebroker.consume({TOPIC_SLICE}, consume_timeout=CONSUME_TIMEOUT): ## yield SliceEvent(**json.loads(message.content)) # @@ -1100,7 +1003,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # # ----- Connection ------------------------------------------------------------------------------------------------- # # @safe_and_metered_rpc_method(METRICS, LOGGER) -# def ListConnectionIds(self, request: ServiceId, context : grpc.ServicerContext) -> ConnectionIdList: +# def ListConnectionIds(self, request : ServiceId, context : grpc.ServicerContext) -> ConnectionIdList: # with self.session() as session: # result = session.query(DeviceModel).all() # return DeviceIdList(device_ids=[device.dump_id() for device in result]) @@ -1113,7 +1016,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # return ConnectionIdList(connection_ids=[db_connection.dump_id() for db_connection in db_connections]) # # @safe_and_metered_rpc_method(METRICS, LOGGER) -# def ListConnections(self, request: ContextId, context : grpc.ServicerContext) -> ServiceList: +# def ListConnections(self, request : ContextId, context : grpc.ServicerContext) -> ServiceList: # with self.lock: # str_key = key_to_str([request.context_id.context_uuid.uuid, request.service_uuid.uuid]) # db_service : ServiceModel = get_object(self.database, ServiceModel, str_key) @@ -1122,13 +1025,13 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # return ConnectionList(connections=[db_connection.dump() for db_connection in db_connections]) # # @safe_and_metered_rpc_method(METRICS, LOGGER) -# def GetConnection(self, request: ConnectionId, context : grpc.ServicerContext) -> Connection: +# def GetConnection(self, request : ConnectionId, context : grpc.ServicerContext) -> Connection: # with self.lock: # db_connection : ConnectionModel = get_object(self.database, ConnectionModel, request.connection_uuid.uuid) # return Connection(**db_connection.dump(include_path=True, include_sub_service_ids=True)) # # @safe_and_metered_rpc_method(METRICS, LOGGER) -# def SetConnection(self, request: Connection, context : grpc.ServicerContext) -> ConnectionId: +# def SetConnection(self, request : Connection, context : grpc.ServicerContext) -> ConnectionId: # with self.lock: # connection_uuid = request.connection_id.connection_uuid.uuid # @@ -1167,7 +1070,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # return ConnectionId(**dict_connection_id) # # @safe_and_metered_rpc_method(METRICS, LOGGER) -# def RemoveConnection(self, request: ConnectionId, context : grpc.ServicerContext) -> Empty: +# def RemoveConnection(self, request : ConnectionId, context : grpc.ServicerContext) -> Empty: # with self.lock: # db_connection = ConnectionModel(self.database, request.connection_uuid.uuid, auto_load=False) # found = db_connection.load() @@ -1181,7 +1084,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # return Empty() # ## @safe_and_metered_rpc_method(METRICS, LOGGER) -## def GetConnectionEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[ConnectionEvent]: +## def GetConnectionEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[ConnectionEvent]: ## for message in self.messagebroker.consume({TOPIC_CONNECTION}, consume_timeout=CONSUME_TIMEOUT): ## yield ConnectionEvent(**json.loads(message.content)) # @@ -1189,28 +1092,28 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # # ----- Policy ----------------------------------------------------------------------------------------------------- # # @safe_and_metered_rpc_method(METRICS, LOGGER) -# def ListPolicyRuleIds(self, request: Empty, context: grpc.ServicerContext) -> PolicyRuleIdList: +# def ListPolicyRuleIds(self, request : Empty, context: grpc.ServicerContext) -> PolicyRuleIdList: # with self.lock: # db_policy_rules: List[PolicyRuleModel] = get_all_objects(self.database, PolicyRuleModel) # db_policy_rules = sorted(db_policy_rules, key=operator.attrgetter('pk')) # return PolicyRuleIdList(policyRuleIdList=[db_policy_rule.dump_id() for db_policy_rule in db_policy_rules]) # # @safe_and_metered_rpc_method(METRICS, LOGGER) -# def ListPolicyRules(self, request: Empty, context: grpc.ServicerContext) -> PolicyRuleList: +# def ListPolicyRules(self, request : Empty, context: grpc.ServicerContext) -> PolicyRuleList: # with self.lock: # db_policy_rules: List[PolicyRuleModel] = get_all_objects(self.database, PolicyRuleModel) # db_policy_rules = sorted(db_policy_rules, key=operator.attrgetter('pk')) # return PolicyRuleList(policyRules=[db_policy_rule.dump() for db_policy_rule in db_policy_rules]) # # @safe_and_metered_rpc_method(METRICS, LOGGER) -# def GetPolicyRule(self, request: PolicyRuleId, context: grpc.ServicerContext) -> PolicyRule: +# def GetPolicyRule(self, request : PolicyRuleId, context: grpc.ServicerContext) -> PolicyRule: # with self.lock: # policy_rule_uuid = request.uuid.uuid # db_policy_rule: PolicyRuleModel = get_object(self.database, PolicyRuleModel, policy_rule_uuid) # return PolicyRule(**db_policy_rule.dump()) # # @safe_and_metered_rpc_method(METRICS, LOGGER) -# def SetPolicyRule(self, request: PolicyRule, context: grpc.ServicerContext) -> PolicyRuleId: +# def SetPolicyRule(self, request : PolicyRule, context: grpc.ServicerContext) -> PolicyRuleId: # with self.lock: # policy_rule_type = request.WhichOneof('policy_rule') # policy_rule_json = grpc_message_to_json(request) @@ -1225,7 +1128,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # return PolicyRuleId(**dict_policy_id) # # @safe_and_metered_rpc_method(METRICS, LOGGER) -# def RemovePolicyRule(self, request: PolicyRuleId, context: grpc.ServicerContext) -> Empty: +# def RemovePolicyRule(self, request : PolicyRuleId, context: grpc.ServicerContext) -> Empty: # with self.lock: # policy_uuid = request.uuid.uuid # db_policy = PolicyRuleModel(self.database, policy_uuid, auto_load=False) diff --git a/src/context/service/Engine.py b/src/context/service/Engine.py index 08e1e4f93..ec4702f27 100644 --- a/src/context/service/Engine.py +++ b/src/context/service/Engine.py @@ -20,21 +20,31 @@ LOGGER = logging.getLogger(__name__) APP_NAME = 'tfs' class Engine: - def get_engine(self) -> sqlalchemy.engine.Engine: + @staticmethod + def get_engine() -> sqlalchemy.engine.Engine: crdb_uri = get_setting('CRDB_URI') try: engine = sqlalchemy.create_engine( - crdb_uri, connect_args={'application_name': APP_NAME}, echo=False, future=True) + crdb_uri, connect_args={'application_name': APP_NAME}, echo=True, future=True) except: # pylint: disable=bare-except LOGGER.exception('Failed to connect to database: {:s}'.format(crdb_uri)) return None try: - if not sqlalchemy_utils.database_exists(engine.url): - sqlalchemy_utils.create_database(engine.url) + Engine.create_database(engine) except: # pylint: disable=bare-except - LOGGER.exception('Failed to check/create to database: {:s}'.format(crdb_uri)) + LOGGER.exception('Failed to check/create to database: {:s}'.format(engine.url)) return None return engine + + @staticmethod + def create_database(engine : sqlalchemy.engine.Engine) -> None: + if not sqlalchemy_utils.database_exists(engine.url): + sqlalchemy_utils.create_database(engine.url) + + @staticmethod + def drop_database(engine : sqlalchemy.engine.Engine) -> None: + if sqlalchemy_utils.database_exists(engine.url): + sqlalchemy_utils.drop_database(engine.url) diff --git a/src/context/service/__main__.py b/src/context/service/__main__.py index c5bbcc3f2..fbdabb2d7 100644 --- a/src/context/service/__main__.py +++ b/src/context/service/__main__.py @@ -45,7 +45,7 @@ def main(): metrics_port = get_metrics_port() start_http_server(metrics_port) - db_engine = Engine().get_engine() + db_engine = Engine.get_engine() rebuild_database(db_engine, drop_if_exists=False) # Get message broker instance diff --git a/src/context/service/database/ContextModel.py b/src/context/service/database/ContextModel.py index 241198d3f..ae8cf995f 100644 --- a/src/context/service/database/ContextModel.py +++ b/src/context/service/database/ContextModel.py @@ -12,15 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging -from typing import Dict +from typing import Dict, List from sqlalchemy import Column, Float, String from sqlalchemy.dialects.postgresql import UUID from sqlalchemy.orm import relationship from ._Base import _Base -LOGGER = logging.getLogger(__name__) - class ContextModel(_Base): __tablename__ = 'context' context_uuid = Column(UUID(as_uuid=False), primary_key=True) @@ -28,33 +25,20 @@ class ContextModel(_Base): created_at = Column(Float) topology = relationship('TopologyModel', back_populates='context') + #service = relationship('ServiceModel', back_populates='context') + #slice = relationship('SliceModel', back_populates='context') def dump_id(self) -> Dict: return {'context_uuid': {'uuid': self.context_uuid}} - #@staticmethod - #def main_pk_name(): - # return 'context_uuid' - - """ - def dump_service_ids(self) -> List[Dict]: - from .ServiceModel import ServiceModel # pylint: disable=import-outside-toplevel - db_service_pks = self.references(ServiceModel) - return [ServiceModel(self.database, pk).dump_id() for pk,_ in db_service_pks] - def dump_topology_ids(self) -> List[Dict]: - from .TopologyModel import TopologyModel # pylint: disable=import-outside-toplevel - db_topology_pks = self.references(TopologyModel) - return [TopologyModel(self.database, pk).dump_id() for pk,_ in db_topology_pks] - """ - - def dump(self, - include_services : bool = True, # pylint: disable=arguments-differ - include_slices : bool = True, # pylint: disable=arguments-differ - include_topologies : bool = True # pylint: disable=arguments-differ - ) -> Dict: - result = {'context_id': self.dump_id(), 'name': self.context_name} - # if include_services: result['service_ids'] = self.dump_service_ids() - # if include_slices: result['slice_ids'] = self.dump_slice_ids() - # if include_topologies: result['topology_ids'] = self.dump_topology_ids() - return result + return + + def dump(self) -> Dict: + return { + 'context_id' : self.dump_id(), + 'name' : self.context_name, + 'topology_ids': [obj.dump_id() for obj in self.topology], + #'service_ids' : [obj.dump_id() for obj in self.service ], + #'slice_ids' : [obj.dump_id() for obj in self.slice ], + } diff --git a/src/context/service/database/DeviceModel.py b/src/context/service/database/DeviceModel.py index cb568e123..5c9e27e06 100644 --- a/src/context/service/database/DeviceModel.py +++ b/src/context/service/database/DeviceModel.py @@ -11,17 +11,18 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + import enum import functools, logging -import uuid -from typing import Dict, List -from common.orm.Database import Database -from common.orm.backend.Tools import key_to_str +#import uuid +from typing import Dict #, List +#from common.orm.Database import Database +#from common.orm.backend.Tools import key_to_str from common.proto.context_pb2 import DeviceDriverEnum, DeviceOperationalStatusEnum -from sqlalchemy import Column, ForeignKey, String, Enum +from sqlalchemy import Column, Float, ForeignKey, String, Enum from sqlalchemy.dialects.postgresql import UUID, ARRAY -from context.service.database._Base import Base from sqlalchemy.orm import relationship +from context.service.database._Base import _Base from .Tools import grpc_to_enum LOGGER = logging.getLogger(__name__) @@ -46,80 +47,152 @@ class ORM_DeviceOperationalStatusEnum(enum.Enum): grpc_to_enum__device_operational_status = functools.partial( grpc_to_enum, DeviceOperationalStatusEnum, ORM_DeviceOperationalStatusEnum) -class DeviceModel(Base): - __tablename__ = 'Device' +class DeviceModel(_Base): + __tablename__ = 'device' device_uuid = Column(UUID(as_uuid=False), primary_key=True) - device_type = Column(String) - device_config_uuid = Column(UUID(as_uuid=False), ForeignKey("Config.config_uuid", ondelete='CASCADE')) - device_operational_status = Column(Enum(ORM_DeviceOperationalStatusEnum, create_constraint=False, - native_enum=False)) + device_name = Column(String(), nullable=False) + device_type = Column(String(), nullable=False) + #device_config_uuid = Column(UUID(as_uuid=False), ForeignKey('config.config_uuid', ondelete='CASCADE')) + device_operational_status = Column(Enum(ORM_DeviceOperationalStatusEnum)) + device_drivers = Column(ARRAY(Enum(ORM_DeviceDriverEnum), dimensions=1)) + created_at = Column(Float) # Relationships - device_config = relationship("ConfigModel", passive_deletes=True, lazy="joined") - driver = relationship("DriverModel", passive_deletes=True, back_populates="device") - endpoints = relationship("EndPointModel", passive_deletes=True, back_populates="device") + topology_device = relationship('TopologyDeviceModel', back_populates='devices') + #device_config = relationship("ConfigModel", passive_deletes=True, lazy="joined") + endpoints = relationship('EndPointModel', passive_deletes=True, back_populates='device') def dump_id(self) -> Dict: return {'device_uuid': {'uuid': self.device_uuid}} - def dump_config(self) -> Dict: - return self.device_config.dump() - - def dump_drivers(self) -> List[int]: - response = [] - for a in self.driver: - response.append(a.dump()) - - return response - - def dump_endpoints(self) -> List[Dict]: - response = [] - - for a in self.endpoints: - response.append(a.dump()) - - return response - - def dump( # pylint: disable=arguments-differ - self, include_config_rules=True, include_drivers=True, include_endpoints=True - ) -> Dict: - result = { - 'device_id': self.dump_id(), - 'device_type': self.device_type, + def dump(self) -> Dict: + return { + 'device_id' : self.dump_id(), + 'name' : self.device_name, + 'device_type' : self.device_type, 'device_operational_status': self.device_operational_status.value, + 'device_drivers' : [d.value for d in self.device_drivers], + #'device_config' : {'config_rules': self.device_config.dump()}, + #'device_endpoints' : [ep.dump() for ep in self.endpoints], } - if include_config_rules: result.setdefault('device_config', {})['config_rules'] = self.dump_config() - if include_drivers: result['device_drivers'] = self.dump_drivers() - if include_endpoints: result['device_endpoints'] = self.dump_endpoints() - return result - - @staticmethod - def main_pk_name(): - return 'device_uuid' -class DriverModel(Base): # pylint: disable=abstract-method - __tablename__ = 'Driver' - # driver_uuid = Column(UUID(as_uuid=False), primary_key=True) - device_uuid = Column(UUID(as_uuid=False), ForeignKey("Device.device_uuid", ondelete='CASCADE'), primary_key=True) - driver = Column(Enum(ORM_DeviceDriverEnum, create_constraint=False, native_enum=False)) - - # Relationships - device = relationship("DeviceModel", back_populates="driver") - - - def dump(self) -> Dict: - return self.driver.value - - @staticmethod - def main_pk_name(): - return 'device_uuid' +#def set_drivers(database : Database, db_device : DeviceModel, grpc_device_drivers): +# db_device_pk = db_device.device_uuid +# for driver in grpc_device_drivers: +# orm_driver = grpc_to_enum__device_driver(driver) +# str_device_driver_key = key_to_str([db_device_pk, orm_driver.name]) +# db_device_driver = DriverModel(database, str_device_driver_key) +# db_device_driver.device_fk = db_device +# db_device_driver.driver = orm_driver +# db_device_driver.save() + +# def set_kpi_sample_types(self, db_endpoint: EndPointModel, grpc_endpoint_kpi_sample_types): +# db_endpoint_pk = db_endpoint.endpoint_uuid +# for kpi_sample_type in grpc_endpoint_kpi_sample_types: +# orm_kpi_sample_type = grpc_to_enum__kpi_sample_type(kpi_sample_type) +# # str_endpoint_kpi_sample_type_key = key_to_str([db_endpoint_pk, orm_kpi_sample_type.name]) +# data = {'endpoint_uuid': db_endpoint_pk, +# 'kpi_sample_type': orm_kpi_sample_type.name, +# 'kpi_uuid': str(uuid.uuid4())} +# db_endpoint_kpi_sample_type = KpiSampleTypeModel(**data) +# self.database.create(db_endpoint_kpi_sample_type) + +# def set_drivers(self, db_device: DeviceModel, grpc_device_drivers): +# db_device_pk = db_device.device_uuid +# for driver in grpc_device_drivers: +# orm_driver = grpc_to_enum__device_driver(driver) +# str_device_driver_key = key_to_str([db_device_pk, orm_driver.name]) +# driver_config = { +# # "driver_uuid": str(uuid.uuid4()), +# "device_uuid": db_device_pk, +# "driver": orm_driver.name +# } +# db_device_driver = DriverModel(**driver_config) +# db_device_driver.device_fk = db_device +# db_device_driver.driver = orm_driver +# +# self.database.create_or_update(db_device_driver) -def set_drivers(database : Database, db_device : DeviceModel, grpc_device_drivers): - db_device_pk = db_device.device_uuid - for driver in grpc_device_drivers: - orm_driver = grpc_to_enum__device_driver(driver) - str_device_driver_key = key_to_str([db_device_pk, orm_driver.name]) - db_device_driver = DriverModel(database, str_device_driver_key) - db_device_driver.device_fk = db_device - db_device_driver.driver = orm_driver - db_device_driver.save() +# def update_config( +# self, session, db_parent_pk: str, config_name: str, +# raw_config_rules: List[Tuple[ORM_ConfigActionEnum, str, str]] +# ) -> List[Tuple[Union[ConfigModel, ConfigRuleModel], bool]]: +# +# created = False +# +# db_config = session.query(ConfigModel).filter_by(**{ConfigModel.main_pk_name(): db_parent_pk}).one_or_none() +# if not db_config: +# db_config = ConfigModel() +# setattr(db_config, ConfigModel.main_pk_name(), db_parent_pk) +# session.add(db_config) +# session.commit() +# created = True +# +# LOGGER.info('UPDATED-CONFIG: {}'.format(db_config.dump())) +# +# db_objects: List[Tuple[Union[ConfigModel, ConfigRuleModel], bool]] = [(db_config, created)] +# +# for position, (action, resource_key, resource_value) in enumerate(raw_config_rules): +# if action == ORM_ConfigActionEnum.SET: +# result : Tuple[ConfigRuleModel, bool] = self.set_config_rule( +# db_config, position, resource_key, resource_value) +# db_config_rule, updated = result +# db_objects.append((db_config_rule, updated)) +# elif action == ORM_ConfigActionEnum.DELETE: +# self.delete_config_rule(db_config, resource_key) +# else: +# msg = 'Unsupported action({:s}) for resource_key({:s})/resource_value({:s})' +# raise AttributeError( +# msg.format(str(ConfigActionEnum.Name(action)), str(resource_key), str(resource_value))) +# +# return db_objects +# +# def set_config_rule(self, db_config: ConfigModel, position: int, resource_key: str, resource_value: str, +# ): # -> Tuple[ConfigRuleModel, bool]: +# +# from src.context.service.database.Tools import fast_hasher +# str_rule_key_hash = fast_hasher(resource_key) +# str_config_rule_key = key_to_str([db_config.config_uuid, str_rule_key_hash], separator=':') +# pk = str(uuid.uuid5(uuid.UUID('9566448d-e950-425e-b2ae-7ead656c7e47'), str_config_rule_key)) +# data = {'config_rule_uuid': pk, 'config_uuid': db_config.config_uuid, 'position': position, +# 'action': ORM_ConfigActionEnum.SET, 'key': resource_key, 'value': resource_value} +# to_add = ConfigRuleModel(**data) +# +# result, updated = self.database.create_or_update(to_add) +# return result, updated +# +# def delete_config_rule( +# self, db_config: ConfigModel, resource_key: str +# ) -> None: +# +# from src.context.service.database.Tools import fast_hasher +# str_rule_key_hash = fast_hasher(resource_key) +# str_config_rule_key = key_to_str([db_config.pk, str_rule_key_hash], separator=':') +# +# db_config_rule = self.database.get_object(ConfigRuleModel, str_config_rule_key, raise_if_not_found=False) +# +# if db_config_rule is None: +# return +# db_config_rule.delete() +# +# def delete_all_config_rules(self, db_config: ConfigModel) -> None: +# +# db_config_rule_pks = db_config.references(ConfigRuleModel) +# for pk, _ in db_config_rule_pks: ConfigRuleModel(self.database, pk).delete() +# +# """ +# for position, (action, resource_key, resource_value) in enumerate(raw_config_rules): +# if action == ORM_ConfigActionEnum.SET: +# result: Tuple[ConfigRuleModel, bool] = set_config_rule( +# database, db_config, position, resource_key, resource_value) +# db_config_rule, updated = result +# db_objects.append((db_config_rule, updated)) +# elif action == ORM_ConfigActionEnum.DELETE: +# delete_config_rule(database, db_config, resource_key) +# else: +# msg = 'Unsupported action({:s}) for resource_key({:s})/resource_value({:s})' +# raise AttributeError( +# msg.format(str(ConfigActionEnum.Name(action)), str(resource_key), str(resource_value))) +# +# return db_objects +# """ diff --git a/src/context/service/database/EndPointModel.py b/src/context/service/database/EndPointModel.py index 38214aa9b..a8d3c2c69 100644 --- a/src/context/service/database/EndPointModel.py +++ b/src/context/service/database/EndPointModel.py @@ -12,93 +12,63 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging -from typing import Dict, List, Optional, Tuple -from common.orm.Database import Database -from common.orm.HighLevel import get_object -from common.orm.backend.Tools import key_to_str -from common.proto.context_pb2 import EndPointId -from .KpiSampleType import ORM_KpiSampleTypeEnum, grpc_to_enum__kpi_sample_type -from sqlalchemy import Column, ForeignKey, String, Enum, ForeignKeyConstraint -from sqlalchemy.dialects.postgresql import UUID -from context.service.database._Base import Base +import enum, functools +from typing import Dict +from sqlalchemy import Column, String, Enum, ForeignKeyConstraint +from sqlalchemy.dialects.postgresql import ARRAY, UUID from sqlalchemy.orm import relationship -LOGGER = logging.getLogger(__name__) +from common.proto.kpi_sample_types_pb2 import KpiSampleType +from ._Base import _Base +from .Tools import grpc_to_enum -class EndPointModel(Base): - __tablename__ = 'EndPoint' - topology_uuid = Column(UUID(as_uuid=False), ForeignKey("Topology.topology_uuid"), primary_key=True) - device_uuid = Column(UUID(as_uuid=False), ForeignKey("Device.device_uuid", ondelete='CASCADE'), primary_key=True) - endpoint_uuid = Column(UUID(as_uuid=False), primary_key=True, unique=True) - endpoint_type = Column(String) +class ORM_KpiSampleTypeEnum(enum.Enum): + UNKNOWN = KpiSampleType.KPISAMPLETYPE_UNKNOWN + PACKETS_TRANSMITTED = KpiSampleType.KPISAMPLETYPE_PACKETS_TRANSMITTED + PACKETS_RECEIVED = KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED + BYTES_TRANSMITTED = KpiSampleType.KPISAMPLETYPE_BYTES_TRANSMITTED + BYTES_RECEIVED = KpiSampleType.KPISAMPLETYPE_BYTES_RECEIVED + +grpc_to_enum__kpi_sample_type = functools.partial( + grpc_to_enum, KpiSampleType, ORM_KpiSampleTypeEnum) - # Relationships - kpi_sample_types = relationship("KpiSampleTypeModel", passive_deletes=True, back_populates="EndPoint") - device = relationship("DeviceModel", back_populates="endpoints") +class EndPointModel(_Base): + __tablename__ = 'endpoint' + context_uuid = Column(UUID(as_uuid=False), primary_key=True) + topology_uuid = Column(UUID(as_uuid=False), primary_key=True) + device_uuid = Column(UUID(as_uuid=False), primary_key=True) + endpoint_uuid = Column(UUID(as_uuid=False), primary_key=True) + endpoint_type = Column(String) + kpi_sample_types = Column(ARRAY(Enum(ORM_KpiSampleTypeEnum), dimensions=1)) - @staticmethod - def main_pk_name(): - return 'endpoint_uuid' + __table_args__ = ( + ForeignKeyConstraint( + ['context_uuid', 'topology_uuid'], + ['topology.context_uuid', 'topology.topology_uuid'], + ondelete='CASCADE'), + ForeignKeyConstraint( + ['device_uuid'], + ['device.device_uuid'], + ondelete='CASCADE'), + ) - def delete(self) -> None: - for db_kpi_sample_type_pk,_ in self.references(KpiSampleTypeModel): - KpiSampleTypeModel(self.database, db_kpi_sample_type_pk).delete() - super().delete() + topology = relationship('TopologyModel', back_populates='endpoints') + device = relationship('DeviceModel', back_populates='endpoints') def dump_id(self) -> Dict: result = { + 'topology_id': self.topology.dump_id(), 'device_id': self.device.dump_id(), 'endpoint_uuid': {'uuid': self.endpoint_uuid}, } return result - def dump_kpi_sample_types(self) -> List[int]: - # db_kpi_sample_type_pks = self.references(KpiSampleTypeModel) - # return [KpiSampleTypeModel(self.database, pk).dump() for pk,_ in db_kpi_sample_type_pks] - response = [] - for a in self.kpi_sample_types: - response.append(a.dump()) - return response - - def dump( # pylint: disable=arguments-differ - self, include_kpi_sample_types=True - ) -> Dict: - result = { - 'endpoint_id': self.dump_id(), - 'endpoint_type': self.endpoint_type, - } - if include_kpi_sample_types: result['kpi_sample_types'] = self.dump_kpi_sample_types() - return result - - -class KpiSampleTypeModel(Base): # pylint: disable=abstract-method - __tablename__ = 'KpiSampleType' - kpi_uuid = Column(UUID(as_uuid=False), primary_key=True) - endpoint_uuid = Column(UUID(as_uuid=False), ForeignKey("EndPoint.endpoint_uuid", ondelete='CASCADE')) - kpi_sample_type = Column(Enum(ORM_KpiSampleTypeEnum, create_constraint=False, - native_enum=False)) - # __table_args__ = (ForeignKeyConstraint([endpoint_uuid], [EndPointModel.endpoint_uuid]), {}) - - # Relationships - EndPoint = relationship("EndPointModel", passive_deletes=True, back_populates="kpi_sample_types") - def dump(self) -> Dict: - return self.kpi_sample_type.value - - def main_pk_name(self): - return 'kpi_uuid' + return { + 'endpoint_id' : self.dump_id(), + 'endpoint_type' : self.endpoint_type, + 'kpi_sample_types': [kst.value for kst in self.kpi_sample_types], + } -""" -def set_kpi_sample_types(database : Database, db_endpoint : EndPointModel, grpc_endpoint_kpi_sample_types): - db_endpoint_pk = db_endpoint.pk - for kpi_sample_type in grpc_endpoint_kpi_sample_types: - orm_kpi_sample_type = grpc_to_enum__kpi_sample_type(kpi_sample_type) - str_endpoint_kpi_sample_type_key = key_to_str([db_endpoint_pk, orm_kpi_sample_type.name]) - db_endpoint_kpi_sample_type = KpiSampleTypeModel(database, str_endpoint_kpi_sample_type_key) - db_endpoint_kpi_sample_type.endpoint_fk = db_endpoint - db_endpoint_kpi_sample_type.kpi_sample_type = orm_kpi_sample_type - db_endpoint_kpi_sample_type.save() -""" # def get_endpoint( # database : Database, grpc_endpoint_id : EndPointId, # validate_topology_exists : bool = True, validate_device_in_topology : bool = True diff --git a/src/context/service/database/KpiSampleType.py b/src/context/service/database/KpiSampleType.py deleted file mode 100644 index 7f122f185..000000000 --- a/src/context/service/database/KpiSampleType.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import functools -import enum -from common.proto.kpi_sample_types_pb2 import KpiSampleType -from .Tools import grpc_to_enum - -class ORM_KpiSampleTypeEnum(enum.Enum): - UNKNOWN = KpiSampleType.KPISAMPLETYPE_UNKNOWN - PACKETS_TRANSMITTED = KpiSampleType.KPISAMPLETYPE_PACKETS_TRANSMITTED - PACKETS_RECEIVED = KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED - BYTES_TRANSMITTED = KpiSampleType.KPISAMPLETYPE_BYTES_TRANSMITTED - BYTES_RECEIVED = KpiSampleType.KPISAMPLETYPE_BYTES_RECEIVED - -grpc_to_enum__kpi_sample_type = functools.partial( - grpc_to_enum, KpiSampleType, ORM_KpiSampleTypeEnum) diff --git a/src/context/service/database/RelationModels.py b/src/context/service/database/RelationModels.py index 61e05db0e..bcf85d005 100644 --- a/src/context/service/database/RelationModels.py +++ b/src/context/service/database/RelationModels.py @@ -13,39 +13,39 @@ # limitations under the License. import logging -from sqlalchemy import Column, ForeignKey +from sqlalchemy import Column, ForeignKey, ForeignKeyConstraint from sqlalchemy.dialects.postgresql import UUID -from context.service.database._Base import Base +from sqlalchemy.orm import relationship +from context.service.database._Base import _Base LOGGER = logging.getLogger(__name__) -# -# class ConnectionSubServiceModel(Model): # pylint: disable=abstract-method + +# class ConnectionSubServiceModel(Model): # pk = PrimaryKeyField() # connection_fk = ForeignKeyField(ConnectionModel) # sub_service_fk = ForeignKeyField(ServiceModel) # -class LinkEndPointModel(Base): # pylint: disable=abstract-method - __tablename__ = 'LinkEndPoint' - # uuid = Column(UUID(as_uuid=False), primary_key=True, unique=True) - link_uuid = Column(UUID(as_uuid=False), ForeignKey("Link.link_uuid")) - endpoint_uuid = Column(UUID(as_uuid=False), ForeignKey("EndPoint.endpoint_uuid"), primary_key=True) - - @staticmethod - def main_pk_name(): - return 'endpoint_uuid' - +#class LinkEndPointModel(Base): +# __tablename__ = 'LinkEndPoint' +# # uuid = Column(UUID(as_uuid=False), primary_key=True, unique=True) +# link_uuid = Column(UUID(as_uuid=False), ForeignKey("Link.link_uuid")) +# endpoint_uuid = Column(UUID(as_uuid=False), ForeignKey("EndPoint.endpoint_uuid"), primary_key=True) # -# class ServiceEndPointModel(Model): # pylint: disable=abstract-method +# @staticmethod +# def main_pk_name(): +# return 'endpoint_uuid' +# +# class ServiceEndPointModel(Model): # pk = PrimaryKeyField() # service_fk = ForeignKeyField(ServiceModel) # endpoint_fk = ForeignKeyField(EndPointModel) # -# class SliceEndPointModel(Model): # pylint: disable=abstract-method +# class SliceEndPointModel(Model): # pk = PrimaryKeyField() # slice_fk = ForeignKeyField(SliceModel) # endpoint_fk = ForeignKeyField(EndPointModel) # -# class SliceServiceModel(Model): # pylint: disable=abstract-method +# class SliceServiceModel(Model): # pk = PrimaryKeyField() # slice_fk = ForeignKeyField(SliceModel) # service_fk = ForeignKeyField(ServiceMo# pylint: disable=abstract-method @@ -55,26 +55,32 @@ class LinkEndPointModel(Base): # pylint: disable=abstract-method # endpoint_uuid = Column(UUID(as_uuid=False), ForeignKey("EndPoint.endpoint_uuid")) #del) # -# class SliceSubSliceModel(Model): # pylint: disable=abstract-method +# class SliceSubSliceModel(Model): # pk = PrimaryKeyField() # slice_fk = ForeignKeyField(SliceModel) # sub_slice_fk = ForeignKeyField(SliceModel) -class TopologyDeviceModel(Base): # pylint: disable=abstract-method - __tablename__ = 'TopologyDevice' - # uuid = Column(UUID(as_uuid=False), primary_key=True, unique=True) - topology_uuid = Column(UUID(as_uuid=False), ForeignKey("Topology.topology_uuid")) - device_uuid = Column(UUID(as_uuid=False), ForeignKey("Device.device_uuid"), primary_key=True) +class TopologyDeviceModel(_Base): + __tablename__ = 'topology_device' + context_uuid = Column(UUID(as_uuid=False), primary_key=True) + topology_uuid = Column(UUID(as_uuid=False), primary_key=True) + device_uuid = Column(UUID(as_uuid=False), primary_key=True) - @staticmethod - def main_pk_name(): - return 'device_uuid' -# -class TopologyLinkModel(Base): # pylint: disable=abstract-method - __tablename__ = 'TopologyLink' - topology_uuid = Column(UUID(as_uuid=False), ForeignKey("Topology.topology_uuid")) - link_uuid = Column(UUID(as_uuid=False), ForeignKey("Link.link_uuid"), primary_key=True) + topologies = relationship('TopologyModel', back_populates='topology_device') + devices = relationship('DeviceModel', back_populates='topology_device') + + __table_args__ = ( + ForeignKeyConstraint( + ['context_uuid', 'topology_uuid'], + ['topology.context_uuid', 'topology.topology_uuid'], + ondelete='CASCADE'), + ForeignKeyConstraint( + ['device_uuid'], + ['device.device_uuid'], + ondelete='CASCADE'), + ) - @staticmethod - def main_pk_name(): - return 'link_uuid' \ No newline at end of file +#class TopologyLinkModel(Base): +# __tablename__ = 'TopologyLink' +# topology_uuid = Column(UUID(as_uuid=False), ForeignKey("Topology.topology_uuid")) +# link_uuid = Column(UUID(as_uuid=False), ForeignKey("Link.link_uuid"), primary_key=True) diff --git a/src/context/service/database/TopologyModel.py b/src/context/service/database/TopologyModel.py index 102e3ae3f..57fe1b347 100644 --- a/src/context/service/database/TopologyModel.py +++ b/src/context/service/database/TopologyModel.py @@ -12,40 +12,35 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging #, operator -from typing import Dict #, List -from sqlalchemy import Column, ForeignKey +from typing import Dict +from sqlalchemy import Column, Float, ForeignKey, String from sqlalchemy.dialects.postgresql import UUID from sqlalchemy.orm import relationship from ._Base import _Base -LOGGER = logging.getLogger(__name__) - class TopologyModel(_Base): - __tablename__ = 'Topology' + __tablename__ = 'topology' context_uuid = Column(UUID(as_uuid=False), ForeignKey('context.context_uuid'), primary_key=True) topology_uuid = Column(UUID(as_uuid=False), primary_key=True, unique=True) + topology_name = Column(String(), nullable=False) + created_at = Column(Float) # Relationships - context = relationship('ContextModel', back_populates='topology') + context = relationship('ContextModel', back_populates='topology') + topology_device = relationship('TopologyDeviceModel', back_populates='topologies') + #topology_link = relationship('TopologyLinkModel', back_populates='topology') + endpoints = relationship('EndPointModel', back_populates='topology') def dump_id(self) -> Dict: - context_id = self.context.dump_id() return { - 'context_id': context_id, + 'context_id': self.context.dump_id(), 'topology_uuid': {'uuid': self.topology_uuid}, } - #@staticmethod - #def main_pk_name() -> str: - # return 'topology_uuid' - def dump(self) -> Dict: - # pylint: disable=arguments-differ - result = {'topology_id': self.dump_id()} - # params: , devices=None, links=None - #if devices: - # result['device_ids'] = [device.dump_id() for device in devices] - #if links: - # result['link_ids'] = [link.dump_id() for link in links] - return result + return { + 'topology_id': self.dump_id(), + 'name' : self.topology_name, + 'device_ids' : [{'device_uuid': {'uuid': td.device_uuid}} for td in self.topology_device], + #'link_ids' : [{'link_uuid' : {'uuid': td.link_uuid }} for td in self.topology_link ], + } diff --git a/src/context/service/database/__init__.py b/src/context/service/database/__init__.py index c4940470a..9953c8205 100644 --- a/src/context/service/database/__init__.py +++ b/src/context/service/database/__init__.py @@ -11,7 +11,3 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -from ._Base import _Base, rebuild_database -from .ContextModel import ContextModel -from .TopologyModel import TopologyModel diff --git a/src/context/tests/test_unitary.py b/src/context/tests/test_unitary.py index 32c571359..c85042d2c 100644 --- a/src/context/tests/test_unitary.py +++ b/src/context/tests/test_unitary.py @@ -23,7 +23,7 @@ from context.service.Database import Database from common.message_broker.Factory import get_messagebroker_backend, BackendEnum as MessageBrokerBackendEnum from common.message_broker.MessageBroker import MessageBroker from common.proto.context_pb2 import ( - Connection, ConnectionEvent, ConnectionId, Context, ContextEvent, ContextId, Device, DeviceEvent, DeviceId, + Connection, ConnectionEvent, ConnectionId, Context, ContextEvent, ContextId, Device, DeviceDriverEnum, DeviceEvent, DeviceId, DeviceOperationalStatusEnum, Empty, EventTypeEnum, Link, LinkEvent, LinkId, Service, ServiceEvent, ServiceId, ServiceStatusEnum, ServiceTypeEnum, Topology, TopologyEvent, TopologyId) from common.proto.policy_pb2 import (PolicyRuleIdList, PolicyRuleId, PolicyRuleList, PolicyRule) @@ -93,7 +93,10 @@ def context_db_mb(request) -> Tuple[Session, MessageBroker]: #msg = 'Running scenario {:s} db_session={:s}, mb_backend={:s}, mb_settings={:s}...' #LOGGER.info(msg.format(str(name), str(db_session), str(mb_backend.value), str(mb_settings))) - _db_engine = Engine().get_engine() + _db_engine = Engine.get_engine() + Engine.drop_database(_db_engine) + Engine.create_database(_db_engine) + rebuild_database(_db_engine) _msg_broker = MessageBroker(get_messagebroker_backend(backend=MessageBrokerBackendEnum.INMEMORY)) yield _db_engine, _msg_broker @@ -133,16 +136,14 @@ def context_client_grpc(context_service_grpc : ContextService): # pylint: disabl # assert reply.status_code == 200, 'Reply failed with code {}'.format(reply.status_code) # return reply.json() -# ----- Test gRPC methods ---------------------------------------------------------------------------------------------- +# pylint: disable=redefined-outer-name, unused-argument +def test_grpc_initialize(context_client_grpc : ContextClient) -> None: + # dummy method used to initialize fixtures, database, message broker, etc. + pass -def test_grpc_context( - context_client_grpc : ContextClient, # pylint: disable=redefined-outer-name - context_db_mb : Tuple[sqlalchemy.engine.Engine, MessageBroker] # pylint: disable=redefined-outer-name -) -> None: - db_engine = context_db_mb[0] +# ----- Test gRPC methods ---------------------------------------------------------------------------------------------- - # ----- Clean the database ----------------------------------------------------------------------------------------- - rebuild_database(db_engine, drop_if_exists=True) +def test_grpc_context(context_client_grpc : ContextClient) -> None: # pylint: disable=redefined-outer-name # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- #events_collector = EventsCollector( @@ -165,14 +166,6 @@ def test_grpc_context( response = context_client_grpc.ListContexts(Empty()) assert len(response.contexts) == 0 - # ----- Dump state of database before create the object ------------------------------------------------------------ - #db_entries = database.dump_all() - #LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - #for db_entry in db_entries: - # LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover - #LOGGER.info('-----------------------------------------------------------') - #assert len(db_entries) == 0 - # ----- Create the object ------------------------------------------------------------------------------------------ response = context_client_grpc.SetContext(Context(**CONTEXT)) assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID @@ -267,14 +260,6 @@ def test_grpc_context( assert len(response.contexts[0].service_ids) == 0 assert len(response.contexts[0].slice_ids) == 0 - # ----- Dump state of database after create/update the object ------------------------------------------------------ - #db_entries = database.dump_all() - #LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - #for db_entry in db_entries: - # LOGGER.info(db_entry) - #LOGGER.info('-----------------------------------------------------------') - #assert len(db_entries) == 1 - # ----- Remove the object ------------------------------------------------------------------------------------------ context_client_grpc.RemoveContext(ContextId(**CONTEXT_ID)) @@ -294,28 +279,16 @@ def test_grpc_context( # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- #events_collector.stop() - # ----- Dump state of database after remove the object ------------------------------------------------------------- - #db_entries = database.dump_all() - #LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - #for db_entry in db_entries: - # LOGGER.info(db_entry) - #LOGGER.info('-----------------------------------------------------------') - #assert len(db_entries) == 0 - -""" -def test_grpc_topology( - context_client_grpc: ContextClient, # pylint: disable=redefined-outer-name - context_db_mb: Tuple[Session, MessageBroker]): # pylint: disable=redefined-outer-name - session = context_db_mb[0] - - database = Database(session) - # ----- Clean the database ----------------------------------------------------------------------------------------- - database.clear() +def test_grpc_topology(context_client_grpc : ContextClient) -> None: # pylint: disable=redefined-outer-name # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- - events_collector = EventsCollector(context_client_grpc) - events_collector.start() + #events_collector = EventsCollector( + # context_client_grpc, log_events_received=True, + # activate_context_collector = False, activate_topology_collector = True, activate_device_collector = False, + # activate_link_collector = False, activate_service_collector = False, activate_slice_collector = False, + # activate_connection_collector = False) + #events_collector.start() # ----- Prepare dependencies for the test and capture related events ----------------------------------------------- response = context_client_grpc.SetContext(Context(**CONTEXT)) @@ -329,72 +302,90 @@ def test_grpc_topology( with pytest.raises(grpc.RpcError) as e: context_client_grpc.GetTopology(TopologyId(**TOPOLOGY_ID)) assert e.value.code() == grpc.StatusCode.NOT_FOUND - # assert e.value.details() == 'Topology({:s}/{:s}) not found'.format(DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID) - assert e.value.details() == 'Topology({:s}) not found'.format(DEFAULT_TOPOLOGY_UUID) + assert e.value.details() == 'Topology({:s}/{:s}) not found'.format(DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID) + # ----- List when the object does not exist ------------------------------------------------------------------------ response = context_client_grpc.ListTopologyIds(ContextId(**CONTEXT_ID)) assert len(response.topology_ids) == 0 + response = context_client_grpc.ListTopologies(ContextId(**CONTEXT_ID)) assert len(response.topologies) == 0 - # ----- Dump state of database before create the object ------------------------------------------------------------ - db_entries = database.dump_all() - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(db_entry) - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 1 - # ----- Create the object ------------------------------------------------------------------------------------------ response = context_client_grpc.SetTopology(Topology(**TOPOLOGY)) assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - CONTEXT_WITH_TOPOLOGY = copy.deepcopy(CONTEXT) - CONTEXT_WITH_TOPOLOGY['topology_ids'].append(TOPOLOGY_ID) - response = context_client_grpc.SetContext(Context(**CONTEXT_WITH_TOPOLOGY)) - assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID + #CONTEXT_WITH_TOPOLOGY = copy.deepcopy(CONTEXT) + #CONTEXT_WITH_TOPOLOGY['topology_ids'].append(TOPOLOGY_ID) + #response = context_client_grpc.SetContext(Context(**CONTEXT_WITH_TOPOLOGY)) + #assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID # ----- Check create event ----------------------------------------------------------------------------------------- - # events = events_collector.get_events(block=True, count=2) + #events = events_collector.get_events(block=True, count=2) + #assert isinstance(events[0], TopologyEvent) + #assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert events[0].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + #assert events[0].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + #assert isinstance(events[1], ContextEvent) + #assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + #assert events[1].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - # assert isinstance(events[0], TopologyEvent) - # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - # assert events[0].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - # assert events[0].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + # ----- Get when the object exists --------------------------------------------------------------------------------- + response = context_client_grpc.GetContext(ContextId(**CONTEXT_ID)) + assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.name == '' + assert len(response.topology_ids) == 1 + assert response.topology_ids[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.topology_ids[0].topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + assert len(response.service_ids) == 0 + assert len(response.slice_ids) == 0 + + response = context_client_grpc.GetTopology(TopologyId(**TOPOLOGY_ID)) + assert response.topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + assert response.name == '' + assert len(response.device_ids) == 0 + assert len(response.link_ids) == 0 + + # ----- List when the object exists -------------------------------------------------------------------------------- + response = context_client_grpc.ListTopologyIds(ContextId(**CONTEXT_ID)) + assert len(response.topology_ids) == 1 + assert response.topology_ids[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.topology_ids[0].topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - # assert isinstance(events[1], ContextEvent) - # assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - # assert events[1].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + response = context_client_grpc.ListTopologies(ContextId(**CONTEXT_ID)) + assert len(response.topologies) == 1 + assert response.topologies[0].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.topologies[0].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + assert response.topologies[0].name == '' + assert len(response.topologies[0].device_ids) == 0 + assert len(response.topologies[0].link_ids) == 0 # ----- Update the object ------------------------------------------------------------------------------------------ - response = context_client_grpc.SetTopology(Topology(**TOPOLOGY)) + new_topology_name = 'new' + TOPOLOGY_WITH_NAME = copy.deepcopy(TOPOLOGY) + TOPOLOGY_WITH_NAME['name'] = new_topology_name + response = context_client_grpc.SetTopology(Topology(**TOPOLOGY_WITH_NAME)) assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID # ----- Check update event ----------------------------------------------------------------------------------------- - # event = events_collector.get_event(block=True) - # assert isinstance(event, TopologyEvent) - # assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - # assert event.topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - # assert event.topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - - # ----- Dump state of database after create/update the object ------------------------------------------------------ - db_entries = database.dump_all() - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(db_entry) - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 2 + #event = events_collector.get_event(block=True) + #assert isinstance(event, TopologyEvent) + #assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + #assert event.topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + #assert event.topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - # ----- Get when the object exists --------------------------------------------------------------------------------- + # ----- Get when the object is modified ---------------------------------------------------------------------------- response = context_client_grpc.GetTopology(TopologyId(**TOPOLOGY_ID)) assert response.topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID assert response.topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + assert response.name == new_topology_name assert len(response.device_ids) == 0 assert len(response.link_ids) == 0 - # ----- List when the object exists -------------------------------------------------------------------------------- + # ----- List when the object is modified --------------------------------------------------------------------------- response = context_client_grpc.ListTopologyIds(ContextId(**CONTEXT_ID)) assert len(response.topology_ids) == 1 assert response.topology_ids[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID @@ -404,50 +395,46 @@ def test_grpc_topology( assert len(response.topologies) == 1 assert response.topologies[0].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID assert response.topologies[0].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + assert response.topologies[0].name == new_topology_name assert len(response.topologies[0].device_ids) == 0 assert len(response.topologies[0].link_ids) == 0 # ----- Remove the object ------------------------------------------------------------------------------------------ context_client_grpc.RemoveTopology(TopologyId(**TOPOLOGY_ID)) - context_client_grpc.RemoveContext(ContextId(**CONTEXT_ID)) # ----- Check remove event ----------------------------------------------------------------------------------------- - # events = events_collector.get_events(block=True, count=2) - - # assert isinstance(events[0], TopologyEvent) - # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - # assert events[0].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - # assert events[0].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - - # assert isinstance(events[1], ContextEvent) - # assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - # assert events[1].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- - # events_collector.stop() + #event = events_collector.get_event(block=True) + #assert isinstance(event, TopologyEvent) + #assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert event.topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + #assert event.topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - # ----- Dump state of database after remove the object ------------------------------------------------------------- - db_entries = database.dump_all() - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(db_entry) - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 0 + # ----- List after deleting the object ----------------------------------------------------------------------------- + response = context_client_grpc.ListTopologyIds(ContextId(**CONTEXT_ID)) + assert len(response.topology_ids) == 0 + response = context_client_grpc.ListTopologies(ContextId(**CONTEXT_ID)) + assert len(response.topologies) == 0 -def test_grpc_device( - context_client_grpc: ContextClient, # pylint: disable=redefined-outer-name - context_db_mb: Tuple[Session, MessageBroker]): # pylint: disable=redefined-outer-name - session = context_db_mb[0] + # ----- Clean dependencies used in the test and capture related events --------------------------------------------- + context_client_grpc.RemoveContext(ContextId(**CONTEXT_ID)) + #event = events_collector.get_event(block=True) + #assert isinstance(event, ContextEvent) + #assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert event.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - database = Database(session) + # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- + #events_collector.stop() - # ----- Clean the database ----------------------------------------------------------------------------------------- - database.clear() +def test_grpc_device(context_client_grpc : ContextClient) -> None: # pylint: disable=redefined-outer-name # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- - events_collector = EventsCollector(context_client_grpc) - events_collector.start() + #events_collector = EventsCollector( + # context_client_grpc, log_events_received=True, + # activate_context_collector = False, activate_topology_collector = False, activate_device_collector = True, + # activate_link_collector = False, activate_service_collector = False, activate_slice_collector = False, + # activate_connection_collector = False) + #events_collector.start() # ----- Prepare dependencies for the test and capture related events ----------------------------------------------- response = context_client_grpc.SetContext(Context(**CONTEXT)) @@ -457,16 +444,14 @@ def test_grpc_device( assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - events = events_collector.get_events(block=True, count=2) - - assert isinstance(events[0], ContextEvent) - assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - assert isinstance(events[1], TopologyEvent) - assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[1].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert events[1].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + #events = events_collector.get_events(block=True, count=2) + #assert isinstance(events[0], ContextEvent) + #assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert events[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + #assert isinstance(events[1], TopologyEvent) + #assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert events[1].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + #assert events[1].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID # ----- Get when the object does not exist ------------------------------------------------------------------------- with pytest.raises(grpc.RpcError) as e: @@ -481,14 +466,6 @@ def test_grpc_device( response = context_client_grpc.ListDevices(Empty()) assert len(response.devices) == 0 - # ----- Dump state of database before create the object ------------------------------------------------------------ - db_entries = database.dump_all() - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(db_entry) - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 2 - # ----- Create the object ------------------------------------------------------------------------------------------ with pytest.raises(grpc.RpcError) as e: WRONG_DEVICE = copy.deepcopy(DEVICE_R1) @@ -499,6 +476,7 @@ def test_grpc_device( msg = 'request.device_endpoints[0].device_id.device_uuid.uuid({}) is invalid; '\ 'should be == request.device_id.device_uuid.uuid({})'.format(WRONG_DEVICE_UUID, DEVICE_R1_UUID) assert e.value.details() == msg + response = context_client_grpc.SetDevice(Device(**DEVICE_R1)) assert response.device_uuid.uuid == DEVICE_R1_UUID @@ -508,8 +486,41 @@ def test_grpc_device( # assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE # assert event.device_id.device_uuid.uuid == DEVICE_R1_UUID + # ----- Get when the object exists --------------------------------------------------------------------------------- + response = context_client_grpc.GetDevice(DeviceId(**DEVICE_R1_ID)) + assert response.device_id.device_uuid.uuid == DEVICE_R1_UUID + assert response.name == '' + assert response.device_type == 'packet-router' + #assert len(response.device_config.config_rules) == 3 + assert response.device_operational_status == DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_DISABLED + assert len(response.device_drivers) == 1 + assert DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG in response.device_drivers + #assert len(response.device_endpoints) == 3 + + # ----- List when the object exists -------------------------------------------------------------------------------- + response = context_client_grpc.ListDeviceIds(Empty()) + assert len(response.device_ids) == 1 + assert response.device_ids[0].device_uuid.uuid == DEVICE_R1_UUID + + response = context_client_grpc.ListDevices(Empty()) + assert len(response.devices) == 1 + assert response.devices[0].device_id.device_uuid.uuid == DEVICE_R1_UUID + assert response.devices[0].name == '' + assert response.devices[0].device_type == 'packet-router' + #assert len(response.devices[0].device_config.config_rules) == 3 + assert response.devices[0].device_operational_status == DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_DISABLED + assert len(response.devices[0].device_drivers) == 1 + assert DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG in response.devices[0].device_drivers + #assert len(response.devices[0].device_endpoints) == 3 + # ----- Update the object ------------------------------------------------------------------------------------------ - response = context_client_grpc.SetDevice(Device(**DEVICE_R1)) + new_device_name = 'r1' + new_device_driver = DeviceDriverEnum.DEVICEDRIVER_UNDEFINED + DEVICE_UPDATED = copy.deepcopy(DEVICE_R1) + DEVICE_UPDATED['name'] = new_device_name + DEVICE_UPDATED['device_operational_status'] = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED + DEVICE_UPDATED['device_drivers'].append(new_device_driver) + response = context_client_grpc.SetDevice(Device(**DEVICE_UPDATED)) assert response.device_uuid.uuid == DEVICE_R1_UUID # ----- Check update event ----------------------------------------------------------------------------------------- @@ -518,24 +529,19 @@ def test_grpc_device( # assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE # assert event.device_id.device_uuid.uuid == DEVICE_R1_UUID - # ----- Dump state of database after create/update the object ------------------------------------------------------ - db_entries = database.dump_all() - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(db_entry) - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 47 - - # ----- Get when the object exists --------------------------------------------------------------------------------- + # ----- Get when the object is modified ---------------------------------------------------------------------------- response = context_client_grpc.GetDevice(DeviceId(**DEVICE_R1_ID)) assert response.device_id.device_uuid.uuid == DEVICE_R1_UUID + assert response.name == 'r1' assert response.device_type == 'packet-router' - assert len(response.device_config.config_rules) == 3 - assert response.device_operational_status == DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_DISABLED - assert len(response.device_drivers) == 1 - assert len(response.device_endpoints) == 3 + #assert len(response.device_config.config_rules) == 3 + assert response.device_operational_status == DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED + assert len(response.device_drivers) == 2 + assert DeviceDriverEnum.DEVICEDRIVER_UNDEFINED in response.device_drivers + assert DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG in response.device_drivers + #assert len(response.device_endpoints) == 3 - # ----- List when the object exists -------------------------------------------------------------------------------- + # ----- List when the object is modified --------------------------------------------------------------------------- response = context_client_grpc.ListDeviceIds(Empty()) assert len(response.device_ids) == 1 assert response.device_ids[0].device_uuid.uuid == DEVICE_R1_UUID @@ -543,11 +549,14 @@ def test_grpc_device( response = context_client_grpc.ListDevices(Empty()) assert len(response.devices) == 1 assert response.devices[0].device_id.device_uuid.uuid == DEVICE_R1_UUID + assert response.devices[0].name == 'r1' assert response.devices[0].device_type == 'packet-router' - assert len(response.devices[0].device_config.config_rules) == 3 - assert response.devices[0].device_operational_status == DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_DISABLED - assert len(response.devices[0].device_drivers) == 1 - assert len(response.devices[0].device_endpoints) == 3 + #assert len(response.devices[0].device_config.config_rules) == 3 + assert response.devices[0].device_operational_status == DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED + assert len(response.devices[0].device_drivers) == 2 + assert DeviceDriverEnum.DEVICEDRIVER_UNDEFINED in response.devices[0].device_drivers + assert DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG in response.devices[0].device_drivers + #assert len(response.devices[0].device_endpoints) == 3 # ----- Create object relation ------------------------------------------------------------------------------------- TOPOLOGY_WITH_DEVICE = copy.deepcopy(TOPOLOGY) @@ -571,15 +580,7 @@ def test_grpc_device( assert response.device_ids[0].device_uuid.uuid == DEVICE_R1_UUID assert len(response.link_ids) == 0 - # ----- Dump state of database after creating the object relation -------------------------------------------------- - db_entries = database.dump_all() - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(db_entry) - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 47 - - # ----- Remove the object -------------------------------ro----------------------------------------------------------- + # ----- Remove the object ------------------------------------------------------------------------------------------ context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R1_ID)) context_client_grpc.RemoveTopology(TopologyId(**TOPOLOGY_ID)) context_client_grpc.RemoveContext(ContextId(**CONTEXT_ID)) @@ -603,15 +604,8 @@ def test_grpc_device( # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- # events_collector.stop() - # ----- Dump state of database after remove the object ------------------------------------------------------------- - db_entries = database.dump_all() - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(db_entry) - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 0 - +""" def test_grpc_link( context_client_grpc: ContextClient, # pylint: disable=redefined-outer-name context_db_mb: Tuple[Session, MessageBroker]): # pylint: disable=redefined-outer-name -- GitLab From bd291c6424648a822c6449ac4e8b54efaa37230a Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Tue, 3 Jan 2023 17:37:34 +0000 Subject: [PATCH 019/158] Common: - cosmetic changes in RPC method wrapper --- .../rpc_method_wrapper/ServiceExceptions.py | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/src/common/rpc_method_wrapper/ServiceExceptions.py b/src/common/rpc_method_wrapper/ServiceExceptions.py index e8d5c79ac..e516953c5 100644 --- a/src/common/rpc_method_wrapper/ServiceExceptions.py +++ b/src/common/rpc_method_wrapper/ServiceExceptions.py @@ -18,8 +18,7 @@ from typing import Iterable, Union class ServiceException(Exception): def __init__( self, code : grpc.StatusCode, details : str, extra_details : Union[str, Iterable[str]] = [] - ) -> None: - + ) -> None: self.code = code if isinstance(extra_details, str): extra_details = [extra_details] self.details = '; '.join(map(str, [details] + extra_details)) @@ -28,39 +27,34 @@ class ServiceException(Exception): class NotFoundException(ServiceException): def __init__( self, object_name : str, object_uuid: str, extra_details : Union[str, Iterable[str]] = [] - ) -> None: - + ) -> None: details = '{:s}({:s}) not found'.format(str(object_name), str(object_uuid)) super().__init__(grpc.StatusCode.NOT_FOUND, details, extra_details=extra_details) class AlreadyExistsException(ServiceException): def __init__( self, object_name : str, object_uuid: str, extra_details : Union[str, Iterable[str]] = None - ) -> None: - + ) -> None: details = '{:s}({:s}) already exists'.format(str(object_name), str(object_uuid)) super().__init__(grpc.StatusCode.ALREADY_EXISTS, details, extra_details=extra_details) class InvalidArgumentException(ServiceException): def __init__( self, argument_name : str, argument_value: str, extra_details : Union[str, Iterable[str]] = None - ) -> None: - + ) -> None: details = '{:s}({:s}) is invalid'.format(str(argument_name), str(argument_value)) super().__init__(grpc.StatusCode.INVALID_ARGUMENT, details, extra_details=extra_details) class OperationFailedException(ServiceException): def __init__( self, operation : str, extra_details : Union[str, Iterable[str]] = None - ) -> None: - + ) -> None: details = 'Operation({:s}) failed'.format(str(operation)) super().__init__(grpc.StatusCode.INTERNAL, details, extra_details=extra_details) class NotImplementedException(ServiceException): def __init__( self, operation : str, extra_details : Union[str, Iterable[str]] = None - ) -> None: - + ) -> None: details = 'Operation({:s}) not implemented'.format(str(operation)) super().__init__(grpc.StatusCode.UNIMPLEMENTED, details, extra_details=extra_details) -- GitLab From 22d8618260b6abdd6739ce711505c3bd4b8528c6 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Tue, 3 Jan 2023 17:40:59 +0000 Subject: [PATCH 020/158] Context component: - reorganized code spliting database models, enumerations, and operation methods - separated unitary tests per entity and defined order between them - separated unitary test for fasthasher - modev old code to separate folder --- scripts/run_tests_locally-context.sh | 7 +- src/context/requirements.in | 2 + src/context/service/ChangeFeedExample.py | 34 + src/context/service/Constants.py | 8 +- .../service/ContextServiceServicerImpl.py | 549 ++----- src/context/service/Database.py | 2 +- src/context/service/Engine.py | 3 +- src/context/service/{database => }/Events.py | 0 .../service/_old_code/_test_restapi.py | 31 + src/context/service/_old_code/test_unitary.py | 4 +- src/context/service/database/ConfigModel.py | 278 ---- src/context/service/database/DeviceModel.py | 198 --- src/context/service/database/LinkModel.py | 52 - .../service/database/methods/Context.py | 95 ++ .../service/database/methods/Device.py | 296 ++++ src/context/service/database/methods/Link.py | 120 ++ .../service/database/methods/Topology.py | 123 ++ .../service/database/methods/__init__.py | 13 + .../database/models/ConfigRuleModel.py | 44 + .../database/{ => models}/ConnectionModel.py | 10 +- .../database/{ => models}/ConstraintModel.py | 7 +- .../database/{ => models}/ContextModel.py | 12 +- .../service/database/models/DeviceModel.py | 52 + .../database/{ => models}/EndPointModel.py | 19 +- .../service/database/models/LinkModel.py | 41 + .../database/{ => models}/PolicyRuleModel.py | 0 .../database/{ => models}/RelationModels.py | 69 +- .../database/{ => models}/ServiceModel.py | 6 +- .../database/{ => models}/SliceModel.py | 4 +- .../database/{ => models}/TopologyModel.py | 12 +- .../service/database/{ => models}/_Base.py | 0 .../service/database/models/__init__.py | 13 + .../database/models/enums/ConfigAction.py | 25 + .../database/models/enums/DeviceDriver.py | 29 + .../models/enums/DeviceOperationalStatus.py | 25 + .../database/models/enums/KpiSampleType.py | 27 + .../database/models/enums/_GrpcToEnum.py | 32 + .../service/database/models/enums/__init__.py | 13 + .../{Tools.py => tools/FastHasher.py} | 27 +- .../service/database/tools/__init__.py | 13 + src/context/tests/_test_connection.py | 280 ++++ src/context/tests/_test_context.py | 160 ++ src/context/tests/_test_device.py | 199 +++ src/context/tests/_test_link.py | 189 +++ src/context/tests/_test_policy.py | 114 ++ src/context/tests/_test_service.py | 214 +++ src/context/tests/_test_slice.py | 0 src/context/tests/_test_topology.py | 166 ++ src/context/tests/conftest.py | 153 ++ src/context/tests/test_hasher.py | 47 + src/context/tests/test_unitary.py | 1384 +---------------- 51 files changed, 2785 insertions(+), 2416 deletions(-) create mode 100644 src/context/service/ChangeFeedExample.py rename src/context/service/{database => }/Events.py (100%) create mode 100644 src/context/service/_old_code/_test_restapi.py delete mode 100644 src/context/service/database/ConfigModel.py delete mode 100644 src/context/service/database/DeviceModel.py delete mode 100644 src/context/service/database/LinkModel.py create mode 100644 src/context/service/database/methods/Context.py create mode 100644 src/context/service/database/methods/Device.py create mode 100644 src/context/service/database/methods/Link.py create mode 100644 src/context/service/database/methods/Topology.py create mode 100644 src/context/service/database/methods/__init__.py create mode 100644 src/context/service/database/models/ConfigRuleModel.py rename src/context/service/database/{ => models}/ConnectionModel.py (97%) rename src/context/service/database/{ => models}/ConstraintModel.py (98%) rename src/context/service/database/{ => models}/ContextModel.py (86%) create mode 100644 src/context/service/database/models/DeviceModel.py rename src/context/service/database/{ => models}/EndPointModel.py (82%) create mode 100644 src/context/service/database/models/LinkModel.py rename src/context/service/database/{ => models}/PolicyRuleModel.py (100%) rename src/context/service/database/{ => models}/RelationModels.py (57%) rename src/context/service/database/{ => models}/ServiceModel.py (97%) rename src/context/service/database/{ => models}/SliceModel.py (98%) rename src/context/service/database/{ => models}/TopologyModel.py (77%) rename src/context/service/database/{ => models}/_Base.py (100%) create mode 100644 src/context/service/database/models/__init__.py create mode 100644 src/context/service/database/models/enums/ConfigAction.py create mode 100644 src/context/service/database/models/enums/DeviceDriver.py create mode 100644 src/context/service/database/models/enums/DeviceOperationalStatus.py create mode 100644 src/context/service/database/models/enums/KpiSampleType.py create mode 100644 src/context/service/database/models/enums/_GrpcToEnum.py create mode 100644 src/context/service/database/models/enums/__init__.py rename src/context/service/database/{Tools.py => tools/FastHasher.py} (63%) create mode 100644 src/context/service/database/tools/__init__.py create mode 100644 src/context/tests/_test_connection.py create mode 100644 src/context/tests/_test_context.py create mode 100644 src/context/tests/_test_device.py create mode 100644 src/context/tests/_test_link.py create mode 100644 src/context/tests/_test_policy.py create mode 100644 src/context/tests/_test_service.py create mode 100644 src/context/tests/_test_slice.py create mode 100644 src/context/tests/_test_topology.py create mode 100644 src/context/tests/conftest.py create mode 100644 src/context/tests/test_hasher.py diff --git a/scripts/run_tests_locally-context.sh b/scripts/run_tests_locally-context.sh index 61f8cee91..5b6c53aa8 100755 --- a/scripts/run_tests_locally-context.sh +++ b/scripts/run_tests_locally-context.sh @@ -44,8 +44,9 @@ export PYTHONPATH=/home/tfs/tfs-ctrl/src #coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose --maxfail=1 \ # context/tests/test_unitary.py -# --log-level=INFO -o log_cli=true -pytest --verbose --maxfail=1 --durations=0 \ - context/tests/test_unitary.py +# --log-level=INFO -o log_cli=true --durations=0 +pytest --verbose --maxfail=1 \ + context/tests/test_unitary.py \ + context/tests/test_hasher.py #kubectl --namespace $TFS_K8S_NAMESPACE delete service redis-tests diff --git a/src/context/requirements.in b/src/context/requirements.in index 6c68d692d..f5d5ccbe2 100644 --- a/src/context/requirements.in +++ b/src/context/requirements.in @@ -1,8 +1,10 @@ Flask==2.1.3 Flask-RESTful==0.3.9 psycopg2-binary==2.9.3 +pytest-depends==1.0.1 redis==4.1.2 requests==2.27.1 SQLAlchemy==1.4.40 sqlalchemy-cockroachdb==1.4.3 SQLAlchemy-Utils==0.38.3 +prettytable==3.5.0 diff --git a/src/context/service/ChangeFeedExample.py b/src/context/service/ChangeFeedExample.py new file mode 100644 index 000000000..2bd46b546 --- /dev/null +++ b/src/context/service/ChangeFeedExample.py @@ -0,0 +1,34 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + + @safe_and_metered_rpc_method(METRICS, LOGGER) + def GetContextEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[ContextEvent]: + pass + #for message in self.messagebroker.consume({TOPIC_CONTEXT}, consume_timeout=CONSUME_TIMEOUT): + # yield ContextEvent(**json.loads(message.content)) + #cf = ChangeFeedClient() + #ready = cf.initialize() + #if not ready: raise OperationFailedException('Initialize ChangeFeed') + #for timestamp, _, primary_key, is_delete, after in cf.get_changes('context'): + # if is_delete: + # event_type = EventTypeEnum.EVENTTYPE_REMOVE + # else: + # is_create = (timestamp - after.get('created_at')) < 1.0 + # event_type = EventTypeEnum.EVENTTYPE_CREATE if is_create else EventTypeEnum.EVENTTYPE_UPDATE + # event = { + # 'event': {'timestamp': {'timestamp': timestamp}, 'event_type': event_type}, + # 'context_id': json_context_id(primary_key[0]), + # } + # yield ContextEvent(**event) diff --git a/src/context/service/Constants.py b/src/context/service/Constants.py index 9d7c886c7..25790fe29 100644 --- a/src/context/service/Constants.py +++ b/src/context/service/Constants.py @@ -14,12 +14,16 @@ TOPIC_CONNECTION = 'connection' TOPIC_CONTEXT = 'context' -TOPIC_TOPOLOGY = 'topology' TOPIC_DEVICE = 'device' TOPIC_LINK = 'link' +TOPIC_POLICY = 'policy' TOPIC_SERVICE = 'service' TOPIC_SLICE = 'slice' +TOPIC_TOPOLOGY = 'topology' -TOPICS = {TOPIC_CONNECTION, TOPIC_CONTEXT, TOPIC_TOPOLOGY, TOPIC_DEVICE, TOPIC_LINK, TOPIC_SERVICE, TOPIC_SLICE} +TOPICS = { + TOPIC_CONNECTION, TOPIC_CONTEXT, TOPIC_DEVICE, TOPIC_LINK, + TOPIC_POLICY, TOPIC_SERVICE, TOPIC_SLICE, TOPIC_TOPOLOGY +} CONSUME_TIMEOUT = 0.5 # seconds diff --git a/src/context/service/ContextServiceServicerImpl.py b/src/context/service/ContextServiceServicerImpl.py index 2661f25c1..5075d8889 100644 --- a/src/context/service/ContextServiceServicerImpl.py +++ b/src/context/service/ContextServiceServicerImpl.py @@ -16,8 +16,8 @@ import grpc, json, logging, operator, sqlalchemy, threading, time, uuid from sqlalchemy.orm import Session, contains_eager, selectinload, sessionmaker from sqlalchemy.dialects.postgresql import UUID, insert -from sqlalchemy_cockroachdb import run_transaction from typing import Dict, Iterator, List, Optional, Set, Tuple, Union + from common.message_broker.MessageBroker import MessageBroker #from common.orm.backend.Tools import key_to_str from common.proto.context_pb2 import ( @@ -37,6 +37,10 @@ from common.tools.object_factory.Context import json_context_id from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method from common.rpc_method_wrapper.ServiceExceptions import ( InvalidArgumentException, NotFoundException, OperationFailedException) +from context.service.database.methods.Context import context_delete, context_get, context_list_ids, context_list_objs, context_set +from context.service.database.methods.Device import device_delete, device_get, device_list_ids, device_list_objs, device_set +from context.service.database.methods.Link import link_delete, link_get, link_list_ids, link_list_objs, link_set +from context.service.database.methods.Topology import topology_delete, topology_get, topology_list_ids, topology_list_objs, topology_set #from common.tools.grpc.Tools import grpc_message_to_json, grpc_message_to_json_string #from context.service.Database import Database #from context.service.database.ConfigModel import ( @@ -44,24 +48,24 @@ from common.rpc_method_wrapper.ServiceExceptions import ( #from context.service.database.ConnectionModel import ConnectionModel, set_path #from context.service.database.ConstraintModel import ( # ConstraintModel, ConstraintsModel, Union_ConstraintModel, CONSTRAINT_PARSERS, set_constraints) -from context.service.database.ContextModel import ContextModel -from context.service.database.DeviceModel import ( - DeviceModel, grpc_to_enum__device_operational_status, grpc_to_enum__device_driver) -from context.service.database.EndPointModel import EndPointModel, grpc_to_enum__kpi_sample_type +#from context.service.database.models.ContextModel import ContextModel +#from context.service.database.models.DeviceModel import ( +# DeviceModel, grpc_to_enum__device_operational_status, grpc_to_enum__device_driver) +#from context.service.database.models.EndPointModel import EndPointModel, grpc_to_enum__kpi_sample_type #from context.service.database.EndPointModel import EndPointModel, set_kpi_sample_types #from context.service.database.Events import notify_event #from context.service.database.LinkModel import LinkModel #from context.service.database.PolicyRuleModel import PolicyRuleModel -from context.service.database.RelationModels import TopologyDeviceModel +#from context.service.database.RelationModels import TopologyDeviceModel # ConnectionSubServiceModel, LinkEndPointModel, ServiceEndPointModel, SliceEndPointModel, SliceServiceModel, # SliceSubSliceModel, TopologyLinkModel) #from context.service.database.ServiceModel import ( # ServiceModel, grpc_to_enum__service_status, grpc_to_enum__service_type) #from context.service.database.SliceModel import SliceModel, grpc_to_enum__slice_status -from context.service.database.TopologyModel import TopologyModel -#from .Constants import ( -# CONSUME_TIMEOUT, TOPIC_CONNECTION, TOPIC_CONTEXT, TOPIC_DEVICE, TOPIC_LINK, TOPIC_SERVICE, TOPIC_SLICE, -# TOPIC_TOPOLOGY) +#from context.service.database.TopologyModel import TopologyModel +from .Constants import ( + CONSUME_TIMEOUT, TOPIC_CONNECTION, TOPIC_CONTEXT, TOPIC_DEVICE, TOPIC_LINK, TOPIC_POLICY, TOPIC_SERVICE, + TOPIC_SLICE, TOPIC_TOPOLOGY) #from .ChangeFeedClient import ChangeFeedClient LOGGER = logging.getLogger(__name__) @@ -84,508 +88,148 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer def __init__(self, db_engine : sqlalchemy.engine.Engine, messagebroker : MessageBroker) -> None: LOGGER.debug('Creating Servicer...') self.db_engine = db_engine - #self.lock = threading.Lock() - #session = sessionmaker(bind=db_engine, expire_on_commit=False) - #self.session = session - #self.database = Database(session) self.messagebroker = messagebroker LOGGER.debug('Servicer Created') + def _get_metrics(self): return METRICS + + # ----- Context ---------------------------------------------------------------------------------------------------- @safe_and_metered_rpc_method(METRICS, LOGGER) def ListContextIds(self, request : Empty, context : grpc.ServicerContext) -> ContextIdList: - def callback(session : Session) -> List[Dict]: - obj_list : List[ContextModel] = session.query(ContextModel).all() - #.options(selectinload(ContextModel.topology)).filter_by(context_uuid=context_uuid).one_or_none() - return [obj.dump_id() for obj in obj_list] - return ContextIdList(context_ids=run_transaction(sessionmaker(bind=self.db_engine), callback)) + return context_list_ids(self.db_engine) @safe_and_metered_rpc_method(METRICS, LOGGER) def ListContexts(self, request : Empty, context : grpc.ServicerContext) -> ContextList: - def callback(session : Session) -> List[Dict]: - obj_list : List[ContextModel] = session.query(ContextModel).all() - #.options(selectinload(ContextModel.topology)).filter_by(context_uuid=context_uuid).one_or_none() - return [obj.dump() for obj in obj_list] - return ContextList(contexts=run_transaction(sessionmaker(bind=self.db_engine), callback)) + return context_list_objs(self.db_engine) @safe_and_metered_rpc_method(METRICS, LOGGER) def GetContext(self, request : ContextId, context : grpc.ServicerContext) -> Context: - context_uuid = request.context_uuid.uuid - def callback(session : Session) -> Optional[Dict]: - obj : Optional[ContextModel] = session.query(ContextModel)\ - .filter_by(context_uuid=context_uuid).one_or_none() - return None if obj is None else obj.dump() - obj = run_transaction(sessionmaker(bind=self.db_engine), callback) - if obj is None: raise NotFoundException(ContextModel.__name__.replace('Model', ''), context_uuid) - return Context(**obj) + return context_get(self.db_engine, request) @safe_and_metered_rpc_method(METRICS, LOGGER) def SetContext(self, request : Context, context : grpc.ServicerContext) -> ContextId: - context_uuid = request.context_id.context_uuid.uuid - context_name = request.name - - for i, topology_id in enumerate(request.topology_ids): - topology_context_uuid = topology_id.context_id.context_uuid.uuid - if topology_context_uuid != context_uuid: - raise InvalidArgumentException( - 'request.topology_ids[{:d}].context_id.context_uuid.uuid'.format(i), topology_context_uuid, - ['should be == {:s}({:s})'.format('request.context_id.context_uuid.uuid', context_uuid)]) - - for i, service_id in enumerate(request.service_ids): - service_context_uuid = service_id.context_id.context_uuid.uuid - if service_context_uuid != context_uuid: - raise InvalidArgumentException( - 'request.service_ids[{:d}].context_id.context_uuid.uuid'.format(i), service_context_uuid, - ['should be == {:s}({:s})'.format('request.context_id.context_uuid.uuid', context_uuid)]) - - for i, slice_id in enumerate(request.slice_ids): - slice_context_uuid = slice_id.context_id.context_uuid.uuid - if slice_context_uuid != context_uuid: - raise InvalidArgumentException( - 'request.slice_ids[{:d}].context_id.context_uuid.uuid'.format(i), slice_context_uuid, - ['should be == {:s}({:s})'.format('request.context_id.context_uuid.uuid', context_uuid)]) - - def callback(session : Session) -> Tuple[Optional[Dict], bool]: - obj : Optional[ContextModel] = session.query(ContextModel).with_for_update()\ - .filter_by(context_uuid=context_uuid).one_or_none() - is_update = obj is not None - if is_update: - obj.context_name = context_name - session.merge(obj) - else: - session.add(ContextModel(context_uuid=context_uuid, context_name=context_name, created_at=time.time())) - obj : Optional[ContextModel] = session.query(ContextModel)\ - .filter_by(context_uuid=context_uuid).one_or_none() - return (None if obj is None else obj.dump_id()), is_update - - obj_id,updated = run_transaction(sessionmaker(bind=self.db_engine), callback) - if obj_id is None: raise NotFoundException(ContextModel.__name__.replace('Model', ''), context_uuid) - + updated = context_set(self.db_engine, request) #event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE - #notify_event(self.messagebroker, TOPIC_CONTEXT, event_type, {'context_id': obj_id}) - return ContextId(**obj_id) + #notify_event(self.messagebroker, TOPIC_CONTEXT, event_type, {'context_id': request.context_id}) + return request.context_id @safe_and_metered_rpc_method(METRICS, LOGGER) def RemoveContext(self, request : ContextId, context : grpc.ServicerContext) -> Empty: - context_uuid = request.context_uuid.uuid - - def callback(session : Session) -> bool: - num_deleted = session.query(ContextModel).filter_by(context_uuid=context_uuid).delete() - return num_deleted > 0 - - deleted = run_transaction(sessionmaker(bind=self.db_engine), callback) + deleted = context_delete(self.db_engine, request) #if deleted: # notify_event(self.messagebroker, TOPIC_CONTEXT, EventTypeEnum.EVENTTYPE_REMOVE, {'context_id': request}) return Empty() @safe_and_metered_rpc_method(METRICS, LOGGER) def GetContextEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[ContextEvent]: - pass - #for message in self.messagebroker.consume({TOPIC_CONTEXT}, consume_timeout=CONSUME_TIMEOUT): - # yield ContextEvent(**json.loads(message.content)) - #cf = ChangeFeedClient() - #ready = cf.initialize() - #if not ready: raise OperationFailedException('Initialize ChangeFeed') - #for timestamp, _, primary_key, is_delete, after in cf.get_changes('context'): - # if is_delete: - # event_type = EventTypeEnum.EVENTTYPE_REMOVE - # else: - # is_create = (timestamp - after.get('created_at')) < 1.0 - # event_type = EventTypeEnum.EVENTTYPE_CREATE if is_create else EventTypeEnum.EVENTTYPE_UPDATE - # event = { - # 'event': {'timestamp': {'timestamp': timestamp}, 'event_type': event_type}, - # 'context_id': json_context_id(primary_key[0]), - # } - # yield ContextEvent(**event) + for message in self.messagebroker.consume({TOPIC_CONTEXT}, consume_timeout=CONSUME_TIMEOUT): + yield ContextEvent(**json.loads(message.content)) + # ----- Topology --------------------------------------------------------------------------------------------------- @safe_and_metered_rpc_method(METRICS, LOGGER) def ListTopologyIds(self, request : ContextId, context : grpc.ServicerContext) -> TopologyIdList: - context_uuid = request.context_uuid.uuid - def callback(session : Session) -> List[Dict]: - obj_list : List[TopologyModel] = session.query(TopologyModel).filter_by(context_uuid=context_uuid).all() - #.options(selectinload(ContextModel.topology)).filter_by(context_uuid=context_uuid).one_or_none() - return [obj.dump_id() for obj in obj_list] - return TopologyIdList(topology_ids=run_transaction(sessionmaker(bind=self.db_engine), callback)) + return topology_list_ids(self.db_engine, request) @safe_and_metered_rpc_method(METRICS, LOGGER) def ListTopologies(self, request : ContextId, context : grpc.ServicerContext) -> TopologyList: - context_uuid = request.context_uuid.uuid - def callback(session : Session) -> List[Dict]: - obj_list : List[TopologyModel] = session.query(TopologyModel).filter_by(context_uuid=context_uuid).all() - #.options(selectinload(ContextModel.topology)).filter_by(context_uuid=context_uuid).one_or_none() - return [obj.dump() for obj in obj_list] - return TopologyList(topologies=run_transaction(sessionmaker(bind=self.db_engine), callback)) + return topology_list_objs(self.db_engine, request) @safe_and_metered_rpc_method(METRICS, LOGGER) def GetTopology(self, request : TopologyId, context : grpc.ServicerContext) -> Topology: - context_uuid = request.context_id.context_uuid.uuid - topology_uuid = request.topology_uuid.uuid - - def callback(session : Session) -> Optional[Dict]: - obj : Optional[TopologyModel] = session.query(TopologyModel)\ - .filter_by(context_uuid=context_uuid, topology_uuid=topology_uuid).one_or_none() - return None if obj is None else obj.dump() - obj = run_transaction(sessionmaker(bind=self.db_engine), callback) - if obj is None: - obj_uuid = '{:s}/{:s}'.format(context_uuid, topology_uuid) - raise NotFoundException(TopologyModel.__name__.replace('Model', ''), obj_uuid) - return Topology(**obj) + return topology_get(self.db_engine, request) @safe_and_metered_rpc_method(METRICS, LOGGER) def SetTopology(self, request : Topology, context : grpc.ServicerContext) -> TopologyId: - context_uuid = request.topology_id.context_id.context_uuid.uuid - topology_uuid = request.topology_id.topology_uuid.uuid - topology_name = request.name - - devices_to_add : List[str] = [ - {'context_uuid': context_uuid, 'topology_uuid': topology_uuid, 'device_uuid': device_id.device_uuid.uuid} - for device_id in request.device_ids - ] - links_to_add : List[str] = [ - {'context_uuid': context_uuid, 'topology_uuid': topology_uuid, 'link_uuid': link_id.link_uuid.uuid} - for link_id in request.link_ids - ] - print('devices_to_add', devices_to_add) - - def callback(session : Session) -> Tuple[Optional[Dict], bool]: - topology_data = [{ - 'context_uuid' : context_uuid, - 'topology_uuid': topology_uuid, - 'topology_name': topology_name, - 'created_at' : time.time(), - }] - stmt = insert(TopologyModel).values(topology_data) - stmt = stmt.on_conflict_do_update( - index_elements=[TopologyModel.context_uuid, TopologyModel.topology_uuid], - set_=dict(topology_name = stmt.excluded.topology_name) - ) - session.execute(stmt) - - if len(devices_to_add) > 0: - session.execute(insert(TopologyDeviceModel).values(devices_to_add).on_conflict_do_nothing( - index_elements=[ - TopologyDeviceModel.context_uuid, TopologyDeviceModel.topology_uuid, - TopologyDeviceModel.device_uuid - ] - )) - - #if len(link_to_add) > 0: - # session.execute(insert(TopologyLinkModel).values(link_to_add).on_conflict_do_nothing( - # index_elements=[ - # TopologyLinkModel.context_uuid, TopologyLinkModel.topology_uuid, - # TopologyLinkModel.link_uuid - # ] - # )) - - is_update = True - obj : Optional[TopologyModel] = session.query(TopologyModel)\ - .filter_by(context_uuid=context_uuid, topology_uuid=topology_uuid).one_or_none() - return (None if obj is None else obj.dump_id()), is_update - - obj_id,updated = run_transaction(sessionmaker(bind=self.db_engine), callback) - if obj_id is None: raise NotFoundException(ContextModel.__name__.replace('Model', ''), context_uuid) - + updated = topology_set(self.db_engine, request) #event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE - #notify_event(self.messagebroker, TOPIC_TOPOLOGY, event_type, {'topology_id': obj_id}) - return TopologyId(**obj_id) + #notify_event(self.messagebroker, TOPIC_TOPOLOGY, event_type, {'topology_id': request.topology_id}) + return request.topology_id @safe_and_metered_rpc_method(METRICS, LOGGER) def RemoveTopology(self, request : TopologyId, context : grpc.ServicerContext) -> Empty: - context_uuid = request.context_id.context_uuid.uuid - topology_uuid = request.topology_uuid.uuid - - def callback(session : Session) -> bool: - num_deleted = session.query(TopologyModel)\ - .filter_by(context_uuid=context_uuid, topology_uuid=topology_uuid).delete() - return num_deleted > 0 - - deleted = run_transaction(sessionmaker(bind=self.db_engine), callback) + deleted = topology_delete(self.db_engine, request) #if deleted: # notify_event(self.messagebroker, TOPIC_TOPOLOGY, EventTypeEnum.EVENTTYPE_REMOVE, {'topology_id': request}) return Empty() @safe_and_metered_rpc_method(METRICS, LOGGER) def GetTopologyEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[TopologyEvent]: - pass - #for message in self.messagebroker.consume({TOPIC_TOPOLOGY}, consume_timeout=CONSUME_TIMEOUT): - # yield TopologyEvent(**json.loads(message.content)) + for message in self.messagebroker.consume({TOPIC_TOPOLOGY}, consume_timeout=CONSUME_TIMEOUT): + yield TopologyEvent(**json.loads(message.content)) + # ----- Device ----------------------------------------------------------------------------------------------------- @safe_and_metered_rpc_method(METRICS, LOGGER) def ListDeviceIds(self, request : Empty, context : grpc.ServicerContext) -> DeviceIdList: - def callback(session : Session) -> List[Dict]: - obj_list : List[DeviceModel] = session.query(DeviceModel).all() - #.options(selectinload(DeviceModel.topology)).filter_by(context_uuid=context_uuid).one_or_none() - return [obj.dump_id() for obj in obj_list] - return DeviceIdList(device_ids=run_transaction(sessionmaker(bind=self.db_engine), callback)) + return device_list_ids(self.db_engine) @safe_and_metered_rpc_method(METRICS, LOGGER) def ListDevices(self, request : Empty, context : grpc.ServicerContext) -> DeviceList: - def callback(session : Session) -> List[Dict]: - obj_list : List[DeviceModel] = session.query(DeviceModel).all() - #.options(selectinload(DeviceModel.topology)).filter_by(context_uuid=context_uuid).one_or_none() - return [obj.dump() for obj in obj_list] - return DeviceList(devices=run_transaction(sessionmaker(bind=self.db_engine), callback)) + return device_list_objs(self.db_engine) @safe_and_metered_rpc_method(METRICS, LOGGER) def GetDevice(self, request : ContextId, context : grpc.ServicerContext) -> Device: - device_uuid = request.device_uuid.uuid - def callback(session : Session) -> Optional[Dict]: - obj : Optional[DeviceModel] = session.query(DeviceModel)\ - .filter_by(device_uuid=device_uuid).one_or_none() - return None if obj is None else obj.dump() - obj = run_transaction(sessionmaker(bind=self.db_engine), callback) - if obj is None: raise NotFoundException(DeviceModel.__name__.replace('Model', ''), device_uuid) - return Device(**obj) + return device_get(self.db_engine, request) @safe_and_metered_rpc_method(METRICS, LOGGER) def SetDevice(self, request : Device, context : grpc.ServicerContext) -> DeviceId: - device_uuid = request.device_id.device_uuid.uuid - device_name = request.name - device_type = request.device_type - oper_status = grpc_to_enum__device_operational_status(request.device_operational_status) - device_drivers = [grpc_to_enum__device_driver(d) for d in request.device_drivers] - - related_topology_uuids : Set[Tuple[str, str]] = set() - endpoints_data : List[Dict] = list() - for i, endpoint in enumerate(request.device_endpoints): - endpoint_device_uuid = endpoint.endpoint_id.device_id.device_uuid.uuid - if len(endpoint_device_uuid) == 0: endpoint_device_uuid = device_uuid - if device_uuid != endpoint_device_uuid: - raise InvalidArgumentException( - 'request.device_endpoints[{:d}].device_id.device_uuid.uuid'.format(i), endpoint_device_uuid, - ['should be == {:s}({:s})'.format('request.device_id.device_uuid.uuid', device_uuid)]) - - endpoint_context_uuid = endpoint.endpoint_id.topology_id.context_id.context_uuid.uuid - endpoint_topology_uuid = endpoint.endpoint_id.topology_id.topology_uuid.uuid - - kpi_sample_types = [grpc_to_enum__kpi_sample_type(kst) for kst in endpoint.kpi_sample_types] + updated = device_set(self.db_engine, request) + #event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE + #notify_event(self.messagebroker, TOPIC_DEVICE, event_type, {'device_id': request.device_id}) + return request.device_id - endpoints_data.append({ - 'context_uuid' : endpoint_context_uuid, - 'topology_uuid' : endpoint_topology_uuid, - 'device_uuid' : endpoint_device_uuid, - 'endpoint_uuid' : endpoint.endpoint_id.endpoint_uuid.uuid, - 'endpoint_type' : endpoint.endpoint_type, - 'kpi_sample_types': kpi_sample_types, - }) + @safe_and_metered_rpc_method(METRICS, LOGGER) + def RemoveDevice(self, request : DeviceId, context : grpc.ServicerContext) -> Empty: + deleted = device_delete(self.db_engine, request) + #if deleted: + # notify_event(self.messagebroker, TOPIC_DEVICE, EventTypeEnum.EVENTTYPE_REMOVE, {'device_id': request}) + return Empty() - if len(endpoint_context_uuid) > 0 and len(endpoint_topology_uuid) > 0: - related_topology_uuids.add({ - 'context_uuid': endpoint_context_uuid, - 'topology_uuid': endpoint_topology_uuid, - 'device_uuid': endpoint_device_uuid, - }) + @safe_and_metered_rpc_method(METRICS, LOGGER) + def GetDeviceEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[DeviceEvent]: + for message in self.messagebroker.consume({TOPIC_DEVICE}, consume_timeout=CONSUME_TIMEOUT): + yield DeviceEvent(**json.loads(message.content)) - def callback(session : Session) -> Tuple[Optional[Dict], bool]: - obj : Optional[DeviceModel] = session.query(DeviceModel).with_for_update()\ - .filter_by(device_uuid=device_uuid).one_or_none() - is_update = obj is not None - if is_update: - obj.device_name = device_name - obj.device_type = device_type - obj.device_operational_status = oper_status - obj.device_drivers = device_drivers - session.merge(obj) - else: - session.add(DeviceModel( - device_uuid=device_uuid, device_name=device_name, device_type=device_type, - device_operational_status=oper_status, device_drivers=device_drivers, created_at=time.time())) - obj : Optional[DeviceModel] = session.query(DeviceModel)\ - .filter_by(device_uuid=device_uuid).one_or_none() - stmt = insert(EndPointModel).values(endpoints_data) - stmt = stmt.on_conflict_do_update( - index_elements=[ - EndPointModel.context_uuid, EndPointModel.topology_uuid, EndPointModel.device_uuid, - EndPointModel.endpoint_uuid - ], - set_=dict( - endpoint_type = stmt.excluded.endpoint_type, - kpi_sample_types = stmt.excluded.kpi_sample_types, - ) - ) - session.execute(stmt) + # ----- Link ------------------------------------------------------------------------------------------------------- - session.execute(insert(TopologyDeviceModel).values(list(related_topology_uuids)).on_conflict_do_nothing( - index_elements=[ - TopologyDeviceModel.context_uuid, TopologyDeviceModel.topology_uuid, - TopologyDeviceModel.device_uuid - ] - )) + @safe_and_metered_rpc_method(METRICS, LOGGER) + def ListLinkIds(self, request : Empty, context : grpc.ServicerContext) -> LinkIdList: + return link_list_ids(self.db_engine) - return (None if obj is None else obj.dump_id()), is_update + @safe_and_metered_rpc_method(METRICS, LOGGER) + def ListLinks(self, request : Empty, context : grpc.ServicerContext) -> LinkList: + return link_list_objs(self.db_engine) - obj_id,updated = run_transaction(sessionmaker(bind=self.db_engine), callback) - if obj_id is None: raise NotFoundException(DeviceModel.__name__.replace('Model', ''), device_uuid) + @safe_and_metered_rpc_method(METRICS, LOGGER) + def GetLink(self, request : LinkId, context : grpc.ServicerContext) -> Link: + return link_get(self.db_engine, request) + @safe_and_metered_rpc_method(METRICS, LOGGER) + def SetLink(self, request : Link, context : grpc.ServicerContext) -> LinkId: + updated = link_set(self.db_engine, request) #event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE - #notify_event(self.messagebroker, TOPIC_DEVICE, event_type, {'device_id': obj_id}) - return DeviceId(**obj_id) - -# with self.session() as session: -# config_rules = grpc_config_rules_to_raw(request.device_config.config_rules) -# running_config_result = self.update_config(session, device_uuid, 'device', config_rules) -# db_running_config = running_config_result[0][0] -# config_uuid = db_running_config.config_uuid -# running_config_rules = update_config( -# self.database, device_uuid, 'device', request.device_config.config_rules) -# db_running_config = running_config_rules[0][0] -# -# new_obj = DeviceModel(**{ -# 'device_uuid' : device_uuid, -# 'device_type' : request.device_type, -# 'device_operational_status' : grpc_to_enum__device_operational_status(request.device_operational_status), -# 'device_config_uuid' : config_uuid, -# }) -# result: Tuple[DeviceModel, bool] = self.database.create_or_update(new_obj) -# db_device, updated = result -# -# self.set_drivers(db_device, request.device_drivers) -# -# + #notify_event(self.messagebroker, TOPIC_LINK, event_type, {'link_id': request.link_id}) + return request.link_id @safe_and_metered_rpc_method(METRICS, LOGGER) - def RemoveDevice(self, request : DeviceId, context : grpc.ServicerContext) -> Empty: - device_uuid = request.device_uuid.uuid - def callback(session : Session) -> bool: - session.query(TopologyDeviceModel).filter_by(device_uuid=device_uuid).delete() - num_deleted = session.query(DeviceModel).filter_by(device_uuid=device_uuid).delete() - #db_device = session.query(DeviceModel).filter_by(device_uuid=device_uuid).one_or_none() - #session.query(ConfigRuleModel).filter_by(config_uuid=db_device.device_config_uuid).delete() - #session.query(ConfigModel).filter_by(config_uuid=db_device.device_config_uuid).delete() - #session.query(DeviceModel).filter_by(device_uuid=device_uuid).delete() - return num_deleted > 0 - deleted = run_transaction(sessionmaker(bind=self.db_engine), callback) + def RemoveLink(self, request : LinkId, context : grpc.ServicerContext) -> Empty: + deleted = link_delete(self.db_engine, request) #if deleted: - # notify_event(self.messagebroker, TOPIC_DEVICE, EventTypeEnum.EVENTTYPE_REMOVE, {'device_id': request}) + # notify_event(self.messagebroker, TOPIC_LINK, event_type, {'link_id': dict_link_id}) return Empty() @safe_and_metered_rpc_method(METRICS, LOGGER) - def GetDeviceEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[DeviceEvent]: - pass - #for message in self.messagebroker.consume({TOPIC_DEVICE}, consume_timeout=CONSUME_TIMEOUT): - # yield DeviceEvent(**json.loads(message.content)) + def GetLinkEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[LinkEvent]: + for message in self.messagebroker.consume({TOPIC_LINK}, consume_timeout=CONSUME_TIMEOUT): + yield LinkEvent(**json.loads(message.content)) -# # ----- Link ------------------------------------------------------------------------------------------------------- -# -# @safe_and_metered_rpc_method(METRICS, LOGGER) -# def ListLinkIds(self, request : Empty, context : grpc.ServicerContext) -> LinkIdList: -# with self.session() as session: -# result = session.query(LinkModel).all() -# return LinkIdList(link_ids=[db_link.dump_id() for db_link in result]) -# -# -# @safe_and_metered_rpc_method(METRICS, LOGGER) -# def ListLinks(self, request : Empty, context : grpc.ServicerContext) -> LinkList: -# with self.session() as session: -# link_list = LinkList() -# -# db_links = session.query(LinkModel).all() -# -# for db_link in db_links: -# link_uuid = db_link.link_uuid -# filt = {'link_uuid': link_uuid} -# link_endpoints = session.query(LinkEndPointModel).filter_by(**filt).all() -# if link_endpoints: -# eps = [] -# for lep in link_endpoints: -# filt = {'endpoint_uuid': lep.endpoint_uuid} -# eps.append(session.query(EndPointModel).filter_by(**filt).one()) -# link_list.links.append(Link(**db_link.dump(eps))) -# -# return link_list -# -# @safe_and_metered_rpc_method(METRICS, LOGGER) -# def GetLink(self, request : LinkId, context : grpc.ServicerContext) -> Link: -# link_uuid = request.link_uuid.uuid -# with self.session() as session: -# result = session.query(LinkModel).filter(LinkModel.link_uuid == link_uuid).one_or_none() -# if not result: -# raise NotFoundException(LinkModel.__name__.replace('Model', ''), link_uuid) -# -# filt = {'link_uuid': link_uuid} -# link_endpoints = session.query(LinkEndPointModel).filter_by(**filt).all() -# if link_endpoints: -# eps = [] -# for lep in link_endpoints: -# filt = {'endpoint_uuid': lep.endpoint_uuid} -# eps.append(session.query(EndPointModel).filter_by(**filt).one()) -# return Link(**result.dump(eps)) -# -# rd = result.dump() -# rt = Link(**rd) -# -# return rt -# -# -# -# @safe_and_metered_rpc_method(METRICS, LOGGER) -# def SetLink(self, request : Link, context : grpc.ServicerContext) -> LinkId: -# link_uuid = request.link_id.link_uuid.uuid -# -# new_link = LinkModel(**{ -# 'link_uuid': link_uuid -# }) -# result: Tuple[LinkModel, bool] = self.database.create_or_update(new_link) -# db_link, updated = result -# -# for endpoint_id in request.link_endpoint_ids: -# endpoint_uuid = endpoint_id.endpoint_uuid.uuid -# endpoint_device_uuid = endpoint_id.device_id.device_uuid.uuid -# endpoint_topology_uuid = endpoint_id.topology_id.topology_uuid.uuid -# endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid -# -# -# db_topology = None -# if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0: -# db_topology: TopologyModel = self.database.get_object(TopologyModel, endpoint_topology_uuid) -# # check device is in topology -# self.database.get_object(TopologyDeviceModel, endpoint_device_uuid) -# -# -# link_endpoint = LinkEndPointModel(link_uuid=link_uuid, endpoint_uuid=endpoint_uuid) -# result: Tuple[LinkEndPointModel, bool] = self.database.create_or_update(link_endpoint) -# -# if db_topology is not None: -# topology_link = TopologyLinkModel(topology_uuid=endpoint_topology_uuid, link_uuid=link_uuid) -# result: Tuple[TopologyLinkModel, bool] = self.database.create_or_update(topology_link) -# -# event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE -# dict_link_id = db_link.dump_id() -# notify_event(self.messagebroker, TOPIC_LINK, event_type, {'link_id': dict_link_id}) -# return LinkId(**dict_link_id) -# -# @safe_and_metered_rpc_method(METRICS, LOGGER) -# def RemoveLink(self, request : LinkId, context : grpc.ServicerContext) -> Empty: -# with self.session() as session: -# link_uuid = request.link_uuid.uuid -# -# session.query(TopologyLinkModel).filter_by(link_uuid=link_uuid).delete() -# session.query(LinkEndPointModel).filter_by(link_uuid=link_uuid).delete() -# -# result = session.query(LinkModel).filter_by(link_uuid=link_uuid).one_or_none() -# if not result: -# return Empty() -# dict_link_id = result.dump_id() -# -# session.query(LinkModel).filter_by(link_uuid=link_uuid).delete() -# session.commit() -# event_type = EventTypeEnum.EVENTTYPE_REMOVE -# notify_event(self.messagebroker, TOPIC_LINK, event_type, {'link_id': dict_link_id}) -# return Empty() -# -## @safe_and_metered_rpc_method(METRICS, LOGGER) -## def GetLinkEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[LinkEvent]: -## for message in self.messagebroker.consume({TOPIC_LINK}, consume_timeout=CONSUME_TIMEOUT): -## yield LinkEvent(**json.loads(message.content)) -# -# # # ----- Service ---------------------------------------------------------------------------------------------------- # # @safe_and_metered_rpc_method(METRICS, LOGGER) @@ -810,13 +454,13 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # event_type = EventTypeEnum.EVENTTYPE_REMOVE # notify_event(self.messagebroker, TOPIC_SERVICE, event_type, {'service_id': dict_service_id}) # return Empty() -# -## @safe_and_metered_rpc_method(METRICS, LOGGER) -## def GetServiceEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[ServiceEvent]: -## for message in self.messagebroker.consume({TOPIC_SERVICE}, consume_timeout=CONSUME_TIMEOUT): -## yield ServiceEvent(**json.loads(message.content)) -# -# + + @safe_and_metered_rpc_method(METRICS, LOGGER) + def GetServiceEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[ServiceEvent]: + for message in self.messagebroker.consume({TOPIC_SERVICE}, consume_timeout=CONSUME_TIMEOUT): + yield ServiceEvent(**json.loads(message.content)) + + # # ----- Slice ---------------------------------------------------------------------------------------------------- # # @safe_and_metered_rpc_method(METRICS, LOGGER) @@ -993,13 +637,13 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # event_type = EventTypeEnum.EVENTTYPE_REMOVE # notify_event(self.messagebroker, TOPIC_SLICE, event_type, {'slice_id': dict_slice_id}) # return Empty() -# -## @safe_and_metered_rpc_method(METRICS, LOGGER) -## def GetSliceEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[SliceEvent]: -## for message in self.messagebroker.consume({TOPIC_SLICE}, consume_timeout=CONSUME_TIMEOUT): -## yield SliceEvent(**json.loads(message.content)) -# -# + + @safe_and_metered_rpc_method(METRICS, LOGGER) + def GetSliceEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[SliceEvent]: + for message in self.messagebroker.consume({TOPIC_SLICE}, consume_timeout=CONSUME_TIMEOUT): + yield SliceEvent(**json.loads(message.content)) + + # # ----- Connection ------------------------------------------------------------------------------------------------- # # @safe_and_metered_rpc_method(METRICS, LOGGER) @@ -1082,13 +726,13 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # event_type = EventTypeEnum.EVENTTYPE_REMOVE # notify_event(self.messagebroker, TOPIC_CONNECTION, event_type, {'connection_id': dict_connection_id}) # return Empty() -# -## @safe_and_metered_rpc_method(METRICS, LOGGER) -## def GetConnectionEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[ConnectionEvent]: -## for message in self.messagebroker.consume({TOPIC_CONNECTION}, consume_timeout=CONSUME_TIMEOUT): -## yield ConnectionEvent(**json.loads(message.content)) -# -# + + @safe_and_metered_rpc_method(METRICS, LOGGER) + def GetConnectionEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[ConnectionEvent]: + for message in self.messagebroker.consume({TOPIC_CONNECTION}, consume_timeout=CONSUME_TIMEOUT): + yield ConnectionEvent(**json.loads(message.content)) + + # # ----- Policy ----------------------------------------------------------------------------------------------------- # # @safe_and_metered_rpc_method(METRICS, LOGGER) @@ -1140,4 +784,3 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # #event_type = EventTypeEnum.EVENTTYPE_REMOVE # #notify_event(self.messagebroker, TOPIC_POLICY, event_type, {"policy_id": dict_policy_id}) # return Empty() -# \ No newline at end of file diff --git a/src/context/service/Database.py b/src/context/service/Database.py index 03598a97f..edb903a10 100644 --- a/src/context/service/Database.py +++ b/src/context/service/Database.py @@ -2,7 +2,7 @@ import logging from sqlalchemy import MetaData from sqlalchemy.orm import Session #, joinedload from typing import Tuple #, List -from context.service.database._Base import _Base +from context.service.database.models._Base import _Base #from common.orm.backend.Tools import key_to_str from common.rpc_method_wrapper.ServiceExceptions import NotFoundException diff --git a/src/context/service/Engine.py b/src/context/service/Engine.py index ec4702f27..151f33751 100644 --- a/src/context/service/Engine.py +++ b/src/context/service/Engine.py @@ -18,6 +18,7 @@ from common.Settings import get_setting LOGGER = logging.getLogger(__name__) APP_NAME = 'tfs' +ECHO = False # true: dump SQL commands and transactions executed class Engine: @staticmethod @@ -26,7 +27,7 @@ class Engine: try: engine = sqlalchemy.create_engine( - crdb_uri, connect_args={'application_name': APP_NAME}, echo=True, future=True) + crdb_uri, connect_args={'application_name': APP_NAME}, echo=ECHO, future=True) except: # pylint: disable=bare-except LOGGER.exception('Failed to connect to database: {:s}'.format(crdb_uri)) return None diff --git a/src/context/service/database/Events.py b/src/context/service/Events.py similarity index 100% rename from src/context/service/database/Events.py rename to src/context/service/Events.py diff --git a/src/context/service/_old_code/_test_restapi.py b/src/context/service/_old_code/_test_restapi.py new file mode 100644 index 000000000..82a8bca40 --- /dev/null +++ b/src/context/service/_old_code/_test_restapi.py @@ -0,0 +1,31 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +#from context.service._old_code.Populate import populate +#from context.service.rest_server.RestServer import RestServer +#from context.service.rest_server.Resources import RESOURCES + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +#def do_rest_request(url : str): +# base_url = get_service_baseurl_http(ServiceNameEnum.CONTEXT) +# request_url = 'http://{:s}:{:s}{:s}{:s}'.format(str(LOCAL_HOST), str(HTTP_PORT), str(base_url), url) +# LOGGER.warning('Request: GET {:s}'.format(str(request_url))) +# reply = requests.get(request_url) +# LOGGER.warning('Reply: {:s}'.format(str(reply.text))) +# assert reply.status_code == 200, 'Reply failed with code {}'.format(reply.status_code) +# return reply.json() + diff --git a/src/context/service/_old_code/test_unitary.py b/src/context/service/_old_code/test_unitary.py index 04e054aad..5a0dcb9c1 100644 --- a/src/context/service/_old_code/test_unitary.py +++ b/src/context/service/_old_code/test_unitary.py @@ -34,7 +34,7 @@ from common.type_checkers.Assertions import ( validate_topology_ids) from context.client.ContextClient import ContextClient from context.client.EventsCollector import EventsCollector -from context.service.database.Tools import ( +from context.service.database.tools.Tools import ( FASTHASHER_DATA_ACCEPTED_FORMAT, FASTHASHER_ITEM_ACCEPTED_FORMAT, fast_hasher) from context.service.grpc_server.ContextService import ContextService from context.service._old_code.Populate import populate @@ -43,7 +43,7 @@ from context.service.rest_server.Resources import RESOURCES from requests import Session from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker -from context.service.database._Base import Base +from context.service.database.models._Base import Base from .Objects import ( CONNECTION_R1_R3, CONNECTION_R1_R3_ID, CONNECTION_R1_R3_UUID, CONTEXT, CONTEXT_ID, DEVICE_R1, DEVICE_R1_ID, diff --git a/src/context/service/database/ConfigModel.py b/src/context/service/database/ConfigModel.py deleted file mode 100644 index d36622e76..000000000 --- a/src/context/service/database/ConfigModel.py +++ /dev/null @@ -1,278 +0,0 @@ -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import enum -import functools, logging, operator -from typing import Dict, List, Optional, Tuple, Union -from common.orm.backend.Tools import key_to_str -from common.proto.context_pb2 import ConfigActionEnum -from common.tools.grpc.Tools import grpc_message_to_json_string -from sqlalchemy import Column, ForeignKey, INTEGER, CheckConstraint, Enum, String -from sqlalchemy.dialects.postgresql import UUID, ARRAY -from context.service.database._Base import _Base -from sqlalchemy.orm import relationship -from context.service.Database import Database - - -import functools, json, logging, operator -from enum import Enum -from typing import Dict, List, Optional, Tuple, Type, Union -from common.orm.Database import Database -from common.orm.HighLevel import get_object, get_or_create_object, update_or_create_object -from common.orm.backend.Tools import key_to_str -from common.orm.fields.EnumeratedField import EnumeratedField -from common.orm.fields.ForeignKeyField import ForeignKeyField -from common.orm.fields.IntegerField import IntegerField -from common.orm.fields.PrimaryKeyField import PrimaryKeyField -from common.orm.fields.StringField import StringField -from common.orm.model.Model import Model -from common.proto.context_pb2 import ConfigActionEnum, ConfigRule -from common.tools.grpc.Tools import grpc_message_to_json_string -#from .EndPointModel import EndPointModel, get_endpoint -from .Tools import fast_hasher, grpc_to_enum, remove_dict_key - -LOGGER = logging.getLogger(__name__) - -class ORM_ConfigActionEnum(enum.Enum): - UNDEFINED = ConfigActionEnum.CONFIGACTION_UNDEFINED - SET = ConfigActionEnum.CONFIGACTION_SET - DELETE = ConfigActionEnum.CONFIGACTION_DELETE - -grpc_to_enum__config_action = functools.partial( - grpc_to_enum, ConfigActionEnum, ORM_ConfigActionEnum) - -class ConfigModel(Base): # pylint: disable=abstract-method - __tablename__ = 'Config' - config_uuid = Column(UUID(as_uuid=False), primary_key=True) - - # Relationships - config_rule = relationship("ConfigRuleModel", cascade="all,delete", back_populates="config", lazy='joined') - - def dump(self) -> List[Dict]: - config_rules = [] - for a in self.config_rule: - asdf = a.dump() - config_rules.append(asdf) - return [remove_dict_key(config_rule, 'position') for config_rule in config_rules] - - @staticmethod - def main_pk_name(): - return 'config_uuid' - -class ConfigRuleModel(Base): # pylint: disable=abstract-method - __tablename__ = 'ConfigRule' - config_rule_uuid = Column(UUID(as_uuid=False), primary_key=True) - config_uuid = Column(UUID(as_uuid=False), ForeignKey("Config.config_uuid", ondelete='CASCADE'), primary_key=True) - - action = Column(Enum(ORM_ConfigActionEnum, create_constraint=True, native_enum=True), nullable=False) - position = Column(INTEGER, nullable=False) - key = Column(String, nullable=False) - value = Column(String, nullable=False) - - __table_args__ = ( - CheckConstraint(position >= 0, name='check_position_value'), - {} - ) - - # Relationships - config = relationship("ConfigModel", passive_deletes=True, back_populates="config_rule") -class ConfigRuleCustomModel(Model): # pylint: disable=abstract-method - key = StringField(required=True, allow_empty=False) - value = StringField(required=True, allow_empty=False) - - def dump(self) -> Dict: # pylint: disable=arguments-differ - return {'custom': {'resource_key': self.key, 'resource_value': self.value}} - -class ConfigRuleAclModel(Model): # pylint: disable=abstract-method - # TODO: improve definition of fields in ConfigRuleAclModel - # To simplify, endpoint encoded as JSON-string directly; otherwise causes circular dependencies - #endpoint_fk = ForeignKeyField(EndPointModel) - endpoint_id = StringField(required=True, allow_empty=False) - # To simplify, ACL rule is encoded as a JSON-string directly - acl_data = StringField(required=True, allow_empty=False) - - def dump(self) -> Dict: # pylint: disable=arguments-differ - #json_endpoint_id = EndPointModel(self.database, self.endpoint_fk).dump_id() - json_endpoint_id = json.loads(self.endpoint_id) - json_acl_rule_set = json.loads(self.acl_data) - return {'acl': {'endpoint_id': json_endpoint_id, 'rule_set': json_acl_rule_set}} - -# enum values should match name of field in ConfigRuleModel -class ConfigRuleKindEnum(Enum): - CUSTOM = 'custom' - ACL = 'acl' - -Union_SpecificConfigRule = Union[ - ConfigRuleCustomModel, ConfigRuleAclModel -] - -class ConfigRuleModel(Model): # pylint: disable=abstract-method - pk = PrimaryKeyField() - config_fk = ForeignKeyField(ConfigModel) - kind = EnumeratedField(ConfigRuleKindEnum) - position = IntegerField(min_value=0, required=True) - action = EnumeratedField(ORM_ConfigActionEnum, required=True) - config_rule_custom_fk = ForeignKeyField(ConfigRuleCustomModel, required=False) - config_rule_acl_fk = ForeignKeyField(ConfigRuleAclModel, required=False) - - def delete(self) -> None: - field_name = 'config_rule_{:s}_fk'.format(str(self.kind.value)) - specific_fk_value : Optional[ForeignKeyField] = getattr(self, field_name, None) - if specific_fk_value is None: - raise Exception('Unable to find config_rule key for field_name({:s})'.format(field_name)) - specific_fk_class = getattr(ConfigRuleModel, field_name, None) - foreign_model_class : Model = specific_fk_class.foreign_model - super().delete() - get_object(self.database, foreign_model_class, str(specific_fk_value)).delete() - - def dump(self, include_position=True) -> Dict: # pylint: disable=arguments-differ - field_name = 'config_rule_{:s}_fk'.format(str(self.kind.value)) - specific_fk_value : Optional[ForeignKeyField] = getattr(self, field_name, None) - if specific_fk_value is None: - raise Exception('Unable to find config_rule key for field_name({:s})'.format(field_name)) - specific_fk_class = getattr(ConfigRuleModel, field_name, None) - foreign_model_class : Model = specific_fk_class.foreign_model - config_rule : Union_SpecificConfigRule = get_object(self.database, foreign_model_class, str(specific_fk_value)) - result = config_rule.dump() - result['action'] = self.action.value - if include_position: result['position'] = self.position - return result - - @staticmethod - def main_pk_name(): - return 'config_rule_uuid' - -def set_config_rule( - database : Database, db_config : ConfigModel, position : int, resource_key : str, resource_value : str, -): # -> Tuple[ConfigRuleModel, bool]: - - str_rule_key_hash = fast_hasher(resource_key) - str_config_rule_key = key_to_str([db_config.config_uuid, str_rule_key_hash], separator=':') - - data = {'config_fk': db_config, 'position': position, 'action': ORM_ConfigActionEnum.SET, 'key': resource_key, - 'value': resource_value} - to_add = ConfigRuleModel(**data) - - result = database.create_or_update(to_add) - return result -Tuple_ConfigRuleSpecs = Tuple[Type, str, Dict, ConfigRuleKindEnum] - -def parse_config_rule_custom(database : Database, grpc_config_rule) -> Tuple_ConfigRuleSpecs: - config_rule_class = ConfigRuleCustomModel - str_config_rule_id = grpc_config_rule.custom.resource_key - config_rule_data = { - 'key' : grpc_config_rule.custom.resource_key, - 'value': grpc_config_rule.custom.resource_value, - } - return config_rule_class, str_config_rule_id, config_rule_data, ConfigRuleKindEnum.CUSTOM - -def parse_config_rule_acl(database : Database, grpc_config_rule) -> Tuple_ConfigRuleSpecs: - config_rule_class = ConfigRuleAclModel - grpc_endpoint_id = grpc_config_rule.acl.endpoint_id - grpc_rule_set = grpc_config_rule.acl.rule_set - device_uuid = grpc_endpoint_id.device_id.device_uuid.uuid - endpoint_uuid = grpc_endpoint_id.endpoint_uuid.uuid - str_endpoint_key = '/'.join([device_uuid, endpoint_uuid]) - #str_endpoint_key, db_endpoint = get_endpoint(database, grpc_endpoint_id) - str_config_rule_id = ':'.join([str_endpoint_key, grpc_rule_set.name]) - config_rule_data = { - #'endpoint_fk': db_endpoint, - 'endpoint_id': grpc_message_to_json_string(grpc_endpoint_id), - 'acl_data': grpc_message_to_json_string(grpc_rule_set), - } - return config_rule_class, str_config_rule_id, config_rule_data, ConfigRuleKindEnum.ACL - -CONFIGRULE_PARSERS = { - 'custom': parse_config_rule_custom, - 'acl' : parse_config_rule_acl, -} - -Union_ConfigRuleModel = Union[ - ConfigRuleCustomModel, ConfigRuleAclModel, -] - -def set_config_rule( - database : Database, db_config : ConfigModel, grpc_config_rule : ConfigRule, position : int -) -> Tuple[Union_ConfigRuleModel, bool]: - grpc_config_rule_kind = str(grpc_config_rule.WhichOneof('config_rule')) - parser = CONFIGRULE_PARSERS.get(grpc_config_rule_kind) - if parser is None: - raise NotImplementedError('ConfigRule of kind {:s} is not implemented: {:s}'.format( - grpc_config_rule_kind, grpc_message_to_json_string(grpc_config_rule))) - - # create specific ConfigRule - config_rule_class, str_config_rule_id, config_rule_data, config_rule_kind = parser(database, grpc_config_rule) - str_config_rule_key_hash = fast_hasher(':'.join([config_rule_kind.value, str_config_rule_id])) - str_config_rule_key = key_to_str([db_config.pk, str_config_rule_key_hash], separator=':') - result : Tuple[Union_ConfigRuleModel, bool] = update_or_create_object( - database, config_rule_class, str_config_rule_key, config_rule_data) - db_specific_config_rule, updated = result - - # create generic ConfigRule - config_rule_fk_field_name = 'config_rule_{:s}_fk'.format(config_rule_kind.value) - config_rule_data = { - 'config_fk': db_config, 'kind': config_rule_kind, 'position': position, - 'action': ORM_ConfigActionEnum.SET, - config_rule_fk_field_name: db_specific_config_rule - } - result : Tuple[ConfigRuleModel, bool] = update_or_create_object( - database, ConfigRuleModel, str_config_rule_key, config_rule_data) - db_config_rule, updated = result - - return db_config_rule, updated - -def delete_config_rule( - database : Database, db_config : ConfigModel, grpc_config_rule : ConfigRule -) -> None: - grpc_config_rule_kind = str(grpc_config_rule.WhichOneof('config_rule')) - parser = CONFIGRULE_PARSERS.get(grpc_config_rule_kind) - if parser is None: - raise NotImplementedError('ConfigRule of kind {:s} is not implemented: {:s}'.format( - grpc_config_rule_kind, grpc_message_to_json_string(grpc_config_rule))) - - # delete generic config rules; self deletes specific config rule - _, str_config_rule_id, _, config_rule_kind = parser(database, grpc_config_rule) - str_config_rule_key_hash = fast_hasher(':'.join([config_rule_kind.value, str_config_rule_id])) - str_config_rule_key = key_to_str([db_config.pk, str_config_rule_key_hash], separator=':') - db_config_rule : Optional[ConfigRuleModel] = get_object( - database, ConfigRuleModel, str_config_rule_key, raise_if_not_found=False) - if db_config_rule is None: return - db_config_rule.delete() - -def update_config( - database : Database, db_parent_pk : str, config_name : str, grpc_config_rules -) -> List[Tuple[Union[ConfigModel, ConfigRuleModel], bool]]: - - str_config_key = key_to_str([config_name, db_parent_pk], separator=':') - result : Tuple[ConfigModel, bool] = get_or_create_object(database, ConfigModel, str_config_key) - db_config, created = result - - db_objects = [(db_config, created)] - - for position,grpc_config_rule in enumerate(grpc_config_rules): - action = grpc_to_enum__config_action(grpc_config_rule.action) - - if action == ORM_ConfigActionEnum.SET: - result : Tuple[ConfigRuleModel, bool] = set_config_rule( - database, db_config, grpc_config_rule, position) - db_config_rule, updated = result - db_objects.append((db_config_rule, updated)) - elif action == ORM_ConfigActionEnum.DELETE: - delete_config_rule(database, db_config, grpc_config_rule) - else: - msg = 'Unsupported Action({:s}) for ConfigRule({:s})' - str_action = str(ConfigActionEnum.Name(action)) - str_config_rule = grpc_message_to_json_string(grpc_config_rule) - raise AttributeError(msg.format(str_action, str_config_rule)) - - return db_objects diff --git a/src/context/service/database/DeviceModel.py b/src/context/service/database/DeviceModel.py deleted file mode 100644 index 5c9e27e06..000000000 --- a/src/context/service/database/DeviceModel.py +++ /dev/null @@ -1,198 +0,0 @@ -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import enum -import functools, logging -#import uuid -from typing import Dict #, List -#from common.orm.Database import Database -#from common.orm.backend.Tools import key_to_str -from common.proto.context_pb2 import DeviceDriverEnum, DeviceOperationalStatusEnum -from sqlalchemy import Column, Float, ForeignKey, String, Enum -from sqlalchemy.dialects.postgresql import UUID, ARRAY -from sqlalchemy.orm import relationship -from context.service.database._Base import _Base -from .Tools import grpc_to_enum - -LOGGER = logging.getLogger(__name__) - -class ORM_DeviceDriverEnum(enum.Enum): - UNDEFINED = DeviceDriverEnum.DEVICEDRIVER_UNDEFINED - OPENCONFIG = DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG - TRANSPORT_API = DeviceDriverEnum.DEVICEDRIVER_TRANSPORT_API - P4 = DeviceDriverEnum.DEVICEDRIVER_P4 - IETF_NETWORK_TOPOLOGY = DeviceDriverEnum.DEVICEDRIVER_IETF_NETWORK_TOPOLOGY - ONF_TR_352 = DeviceDriverEnum.DEVICEDRIVER_ONF_TR_352 - XR = DeviceDriverEnum.DEVICEDRIVER_XR - -grpc_to_enum__device_driver = functools.partial( - grpc_to_enum, DeviceDriverEnum, ORM_DeviceDriverEnum) - -class ORM_DeviceOperationalStatusEnum(enum.Enum): - UNDEFINED = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_UNDEFINED - DISABLED = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_DISABLED - ENABLED = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED - -grpc_to_enum__device_operational_status = functools.partial( - grpc_to_enum, DeviceOperationalStatusEnum, ORM_DeviceOperationalStatusEnum) - -class DeviceModel(_Base): - __tablename__ = 'device' - device_uuid = Column(UUID(as_uuid=False), primary_key=True) - device_name = Column(String(), nullable=False) - device_type = Column(String(), nullable=False) - #device_config_uuid = Column(UUID(as_uuid=False), ForeignKey('config.config_uuid', ondelete='CASCADE')) - device_operational_status = Column(Enum(ORM_DeviceOperationalStatusEnum)) - device_drivers = Column(ARRAY(Enum(ORM_DeviceDriverEnum), dimensions=1)) - created_at = Column(Float) - - # Relationships - topology_device = relationship('TopologyDeviceModel', back_populates='devices') - #device_config = relationship("ConfigModel", passive_deletes=True, lazy="joined") - endpoints = relationship('EndPointModel', passive_deletes=True, back_populates='device') - - def dump_id(self) -> Dict: - return {'device_uuid': {'uuid': self.device_uuid}} - - def dump(self) -> Dict: - return { - 'device_id' : self.dump_id(), - 'name' : self.device_name, - 'device_type' : self.device_type, - 'device_operational_status': self.device_operational_status.value, - 'device_drivers' : [d.value for d in self.device_drivers], - #'device_config' : {'config_rules': self.device_config.dump()}, - #'device_endpoints' : [ep.dump() for ep in self.endpoints], - } - -#def set_drivers(database : Database, db_device : DeviceModel, grpc_device_drivers): -# db_device_pk = db_device.device_uuid -# for driver in grpc_device_drivers: -# orm_driver = grpc_to_enum__device_driver(driver) -# str_device_driver_key = key_to_str([db_device_pk, orm_driver.name]) -# db_device_driver = DriverModel(database, str_device_driver_key) -# db_device_driver.device_fk = db_device -# db_device_driver.driver = orm_driver -# db_device_driver.save() - -# def set_kpi_sample_types(self, db_endpoint: EndPointModel, grpc_endpoint_kpi_sample_types): -# db_endpoint_pk = db_endpoint.endpoint_uuid -# for kpi_sample_type in grpc_endpoint_kpi_sample_types: -# orm_kpi_sample_type = grpc_to_enum__kpi_sample_type(kpi_sample_type) -# # str_endpoint_kpi_sample_type_key = key_to_str([db_endpoint_pk, orm_kpi_sample_type.name]) -# data = {'endpoint_uuid': db_endpoint_pk, -# 'kpi_sample_type': orm_kpi_sample_type.name, -# 'kpi_uuid': str(uuid.uuid4())} -# db_endpoint_kpi_sample_type = KpiSampleTypeModel(**data) -# self.database.create(db_endpoint_kpi_sample_type) - -# def set_drivers(self, db_device: DeviceModel, grpc_device_drivers): -# db_device_pk = db_device.device_uuid -# for driver in grpc_device_drivers: -# orm_driver = grpc_to_enum__device_driver(driver) -# str_device_driver_key = key_to_str([db_device_pk, orm_driver.name]) -# driver_config = { -# # "driver_uuid": str(uuid.uuid4()), -# "device_uuid": db_device_pk, -# "driver": orm_driver.name -# } -# db_device_driver = DriverModel(**driver_config) -# db_device_driver.device_fk = db_device -# db_device_driver.driver = orm_driver -# -# self.database.create_or_update(db_device_driver) - -# def update_config( -# self, session, db_parent_pk: str, config_name: str, -# raw_config_rules: List[Tuple[ORM_ConfigActionEnum, str, str]] -# ) -> List[Tuple[Union[ConfigModel, ConfigRuleModel], bool]]: -# -# created = False -# -# db_config = session.query(ConfigModel).filter_by(**{ConfigModel.main_pk_name(): db_parent_pk}).one_or_none() -# if not db_config: -# db_config = ConfigModel() -# setattr(db_config, ConfigModel.main_pk_name(), db_parent_pk) -# session.add(db_config) -# session.commit() -# created = True -# -# LOGGER.info('UPDATED-CONFIG: {}'.format(db_config.dump())) -# -# db_objects: List[Tuple[Union[ConfigModel, ConfigRuleModel], bool]] = [(db_config, created)] -# -# for position, (action, resource_key, resource_value) in enumerate(raw_config_rules): -# if action == ORM_ConfigActionEnum.SET: -# result : Tuple[ConfigRuleModel, bool] = self.set_config_rule( -# db_config, position, resource_key, resource_value) -# db_config_rule, updated = result -# db_objects.append((db_config_rule, updated)) -# elif action == ORM_ConfigActionEnum.DELETE: -# self.delete_config_rule(db_config, resource_key) -# else: -# msg = 'Unsupported action({:s}) for resource_key({:s})/resource_value({:s})' -# raise AttributeError( -# msg.format(str(ConfigActionEnum.Name(action)), str(resource_key), str(resource_value))) -# -# return db_objects -# -# def set_config_rule(self, db_config: ConfigModel, position: int, resource_key: str, resource_value: str, -# ): # -> Tuple[ConfigRuleModel, bool]: -# -# from src.context.service.database.Tools import fast_hasher -# str_rule_key_hash = fast_hasher(resource_key) -# str_config_rule_key = key_to_str([db_config.config_uuid, str_rule_key_hash], separator=':') -# pk = str(uuid.uuid5(uuid.UUID('9566448d-e950-425e-b2ae-7ead656c7e47'), str_config_rule_key)) -# data = {'config_rule_uuid': pk, 'config_uuid': db_config.config_uuid, 'position': position, -# 'action': ORM_ConfigActionEnum.SET, 'key': resource_key, 'value': resource_value} -# to_add = ConfigRuleModel(**data) -# -# result, updated = self.database.create_or_update(to_add) -# return result, updated -# -# def delete_config_rule( -# self, db_config: ConfigModel, resource_key: str -# ) -> None: -# -# from src.context.service.database.Tools import fast_hasher -# str_rule_key_hash = fast_hasher(resource_key) -# str_config_rule_key = key_to_str([db_config.pk, str_rule_key_hash], separator=':') -# -# db_config_rule = self.database.get_object(ConfigRuleModel, str_config_rule_key, raise_if_not_found=False) -# -# if db_config_rule is None: -# return -# db_config_rule.delete() -# -# def delete_all_config_rules(self, db_config: ConfigModel) -> None: -# -# db_config_rule_pks = db_config.references(ConfigRuleModel) -# for pk, _ in db_config_rule_pks: ConfigRuleModel(self.database, pk).delete() -# -# """ -# for position, (action, resource_key, resource_value) in enumerate(raw_config_rules): -# if action == ORM_ConfigActionEnum.SET: -# result: Tuple[ConfigRuleModel, bool] = set_config_rule( -# database, db_config, position, resource_key, resource_value) -# db_config_rule, updated = result -# db_objects.append((db_config_rule, updated)) -# elif action == ORM_ConfigActionEnum.DELETE: -# delete_config_rule(database, db_config, resource_key) -# else: -# msg = 'Unsupported action({:s}) for resource_key({:s})/resource_value({:s})' -# raise AttributeError( -# msg.format(str(ConfigActionEnum.Name(action)), str(resource_key), str(resource_value))) -# -# return db_objects -# """ diff --git a/src/context/service/database/LinkModel.py b/src/context/service/database/LinkModel.py deleted file mode 100644 index 6b768d1b7..000000000 --- a/src/context/service/database/LinkModel.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging, operator -from typing import Dict, List -from sqlalchemy import Column, ForeignKey -from sqlalchemy.dialects.postgresql import UUID -from context.service.database._Base import Base -from sqlalchemy.orm import relationship - -LOGGER = logging.getLogger(__name__) - -class LinkModel(Base): - __tablename__ = 'Link' - link_uuid = Column(UUID(as_uuid=False), primary_key=True, unique=True) - - @staticmethod - def main_pk_name(): - return 'link_uuid' - - def dump_id(self) -> Dict: - return {'link_uuid': {'uuid': self.link_uuid}} - - def dump_endpoint_ids(self) -> List[Dict]: - return [endpoint.dump_id() for endpoint in self.endpoints] - - def dump(self, endpoints=None) -> Dict: - result = { - 'link_id': self.dump_id() - } - if endpoints: - result['link_endpoint_ids'] = [] - for endpoint in endpoints: - dump = endpoint.dump_id() - LOGGER.info(dump) - result['link_endpoint_ids'].append(dump) - - LOGGER.info(result['link_endpoint_ids']) - - LOGGER.info(result) - return result diff --git a/src/context/service/database/methods/Context.py b/src/context/service/database/methods/Context.py new file mode 100644 index 000000000..8f1c2ee23 --- /dev/null +++ b/src/context/service/database/methods/Context.py @@ -0,0 +1,95 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import time +from sqlalchemy.dialects.postgresql import insert +from sqlalchemy.engine import Engine +from sqlalchemy.orm import Session, sessionmaker +from sqlalchemy_cockroachdb import run_transaction +from typing import Dict, List, Optional +from common.proto.context_pb2 import Context, ContextId, ContextIdList, ContextList +from common.rpc_method_wrapper.ServiceExceptions import InvalidArgumentException, NotFoundException +from context.service.database.models.ContextModel import ContextModel + +def context_list_ids(db_engine : Engine) -> ContextIdList: + def callback(session : Session) -> List[Dict]: + obj_list : List[ContextModel] = session.query(ContextModel).all() + #.options(selectinload(ContextModel.topology)).filter_by(context_uuid=context_uuid).one_or_none() + return [obj.dump_id() for obj in obj_list] + return ContextIdList(context_ids=run_transaction(sessionmaker(bind=db_engine), callback)) + +def context_list_objs(db_engine : Engine) -> ContextList: + def callback(session : Session) -> List[Dict]: + obj_list : List[ContextModel] = session.query(ContextModel).all() + #.options(selectinload(ContextModel.topology)).filter_by(context_uuid=context_uuid).one_or_none() + return [obj.dump() for obj in obj_list] + return ContextList(contexts=run_transaction(sessionmaker(bind=db_engine), callback)) + +def context_get(db_engine : Engine, request : ContextId) -> Context: + context_uuid = request.context_uuid.uuid + def callback(session : Session) -> Optional[Dict]: + obj : Optional[ContextModel] = session.query(ContextModel)\ + .filter_by(context_uuid=context_uuid).one_or_none() + return None if obj is None else obj.dump() + obj = run_transaction(sessionmaker(bind=db_engine), callback) + if obj is None: raise NotFoundException('Context', context_uuid) + return Context(**obj) + +def context_set(db_engine : Engine, request : Context) -> bool: + context_uuid = request.context_id.context_uuid.uuid + context_name = request.name + + for i, topology_id in enumerate(request.topology_ids): + topology_context_uuid = topology_id.context_id.context_uuid.uuid + if topology_context_uuid != context_uuid: + raise InvalidArgumentException( + 'request.topology_ids[{:d}].context_id.context_uuid.uuid'.format(i), topology_context_uuid, + ['should be == {:s}({:s})'.format('request.context_id.context_uuid.uuid', context_uuid)]) + + for i, service_id in enumerate(request.service_ids): + service_context_uuid = service_id.context_id.context_uuid.uuid + if service_context_uuid != context_uuid: + raise InvalidArgumentException( + 'request.service_ids[{:d}].context_id.context_uuid.uuid'.format(i), service_context_uuid, + ['should be == {:s}({:s})'.format('request.context_id.context_uuid.uuid', context_uuid)]) + + for i, slice_id in enumerate(request.slice_ids): + slice_context_uuid = slice_id.context_id.context_uuid.uuid + if slice_context_uuid != context_uuid: + raise InvalidArgumentException( + 'request.slice_ids[{:d}].context_id.context_uuid.uuid'.format(i), slice_context_uuid, + ['should be == {:s}({:s})'.format('request.context_id.context_uuid.uuid', context_uuid)]) + + def callback(session : Session) -> None: + context_data = [{ + 'context_uuid': context_uuid, + 'context_name': context_name, + 'created_at' : time.time(), + }] + stmt = insert(ContextModel).values(context_data) + stmt = stmt.on_conflict_do_update( + index_elements=[ContextModel.context_uuid], + set_=dict(context_name = stmt.excluded.context_name) + ) + session.execute(stmt) + + run_transaction(sessionmaker(bind=db_engine), callback) + return False # TODO: improve and check if created/updated + +def context_delete(db_engine : Engine, request : ContextId) -> bool: + context_uuid = request.context_uuid.uuid + def callback(session : Session) -> bool: + num_deleted = session.query(ContextModel).filter_by(context_uuid=context_uuid).delete() + return num_deleted > 0 + return run_transaction(sessionmaker(bind=db_engine), callback) diff --git a/src/context/service/database/methods/Device.py b/src/context/service/database/methods/Device.py new file mode 100644 index 000000000..e7dc3dadb --- /dev/null +++ b/src/context/service/database/methods/Device.py @@ -0,0 +1,296 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import time +from sqlalchemy import delete +from sqlalchemy.dialects.postgresql import insert +from sqlalchemy.engine import Engine +from sqlalchemy.orm import Session, sessionmaker +from sqlalchemy_cockroachdb import run_transaction +from typing import Dict, List, Optional, Set, Tuple +from common.proto.context_pb2 import Device, DeviceId, DeviceIdList, DeviceList +from common.rpc_method_wrapper.ServiceExceptions import InvalidArgumentException, NotFoundException +from common.tools.grpc.Tools import grpc_message_to_json_string +from context.service.database.models.ConfigRuleModel import ConfigRuleKindEnum, ConfigRuleModel +from context.service.database.models.DeviceModel import DeviceModel +from context.service.database.models.EndPointModel import EndPointModel +from context.service.database.models.RelationModels import TopologyDeviceModel +from context.service.database.models.enums.ConfigAction import grpc_to_enum__config_action +from context.service.database.models.enums.DeviceDriver import grpc_to_enum__device_driver +from context.service.database.models.enums.DeviceOperationalStatus import grpc_to_enum__device_operational_status +from context.service.database.models.enums.KpiSampleType import grpc_to_enum__kpi_sample_type + +def device_list_ids(db_engine : Engine) -> DeviceIdList: + def callback(session : Session) -> List[Dict]: + obj_list : List[DeviceModel] = session.query(DeviceModel).all() + #.options(selectinload(DeviceModel.topology)).filter_by(context_uuid=context_uuid).one_or_none() + return [obj.dump_id() for obj in obj_list] + return DeviceIdList(device_ids=run_transaction(sessionmaker(bind=db_engine), callback)) + +def device_list_objs(db_engine : Engine) -> DeviceList: + def callback(session : Session) -> List[Dict]: + obj_list : List[DeviceModel] = session.query(DeviceModel).all() + #.options(selectinload(DeviceModel.topology)).filter_by(context_uuid=context_uuid).one_or_none() + return [obj.dump() for obj in obj_list] + return DeviceList(devices=run_transaction(sessionmaker(bind=db_engine), callback)) + +def device_get(db_engine : Engine, request : DeviceId) -> Device: + device_uuid = request.device_uuid.uuid + def callback(session : Session) -> Optional[Dict]: + obj : Optional[DeviceModel] = session.query(DeviceModel)\ + .filter_by(device_uuid=device_uuid).one_or_none() + return None if obj is None else obj.dump() + obj = run_transaction(sessionmaker(bind=db_engine), callback) + if obj is None: raise NotFoundException('Device', device_uuid) + return Device(**obj) + +def device_set(db_engine : Engine, request : Device) -> bool: + device_uuid = request.device_id.device_uuid.uuid + device_name = request.name + device_type = request.device_type + oper_status = grpc_to_enum__device_operational_status(request.device_operational_status) + device_drivers = [grpc_to_enum__device_driver(d) for d in request.device_drivers] + + topology_keys : Set[Tuple[str, str]] = set() + related_topologies : List[Dict] = list() + endpoints_data : List[Dict] = list() + for i, endpoint in enumerate(request.device_endpoints): + endpoint_device_uuid = endpoint.endpoint_id.device_id.device_uuid.uuid + if len(endpoint_device_uuid) == 0: endpoint_device_uuid = device_uuid + if device_uuid != endpoint_device_uuid: + raise InvalidArgumentException( + 'request.device_endpoints[{:d}].device_id.device_uuid.uuid'.format(i), endpoint_device_uuid, + ['should be == {:s}({:s})'.format('request.device_id.device_uuid.uuid', device_uuid)]) + + endpoint_context_uuid = endpoint.endpoint_id.topology_id.context_id.context_uuid.uuid + endpoint_topology_uuid = endpoint.endpoint_id.topology_id.topology_uuid.uuid + + kpi_sample_types = [grpc_to_enum__kpi_sample_type(kst) for kst in endpoint.kpi_sample_types] + + endpoints_data.append({ + 'context_uuid' : endpoint_context_uuid, + 'topology_uuid' : endpoint_topology_uuid, + 'device_uuid' : endpoint_device_uuid, + 'endpoint_uuid' : endpoint.endpoint_id.endpoint_uuid.uuid, + 'endpoint_type' : endpoint.endpoint_type, + 'kpi_sample_types': kpi_sample_types, + }) + + if len(endpoint_context_uuid) > 0 and len(endpoint_topology_uuid) > 0: + topology_key = (endpoint_context_uuid, endpoint_topology_uuid) + if topology_key not in topology_keys: + related_topologies.append({ + 'context_uuid': endpoint_context_uuid, + 'topology_uuid': endpoint_topology_uuid, + 'device_uuid': endpoint_device_uuid, + }) + topology_keys.add(topology_key) + + config_rules : List[Dict] = list() + for position,config_rule in enumerate(request.device_config.config_rules): + str_kind = config_rule.WhichOneof('config_rule') + config_rules.append({ + 'device_uuid': device_uuid, + 'kind' : ConfigRuleKindEnum._member_map_.get(str_kind.upper()), # pylint: disable=no-member + 'action' : grpc_to_enum__config_action(config_rule.action), + 'position' : position, + 'data' : grpc_message_to_json_string(getattr(config_rule, str_kind, {})), + }) + + def callback(session : Session) -> None: + obj : Optional[DeviceModel] = session.query(DeviceModel).with_for_update()\ + .filter_by(device_uuid=device_uuid).one_or_none() + is_update = obj is not None + if is_update: + obj.device_name = device_name + obj.device_type = device_type + obj.device_operational_status = oper_status + obj.device_drivers = device_drivers + session.merge(obj) + else: + session.add(DeviceModel( + device_uuid=device_uuid, device_name=device_name, device_type=device_type, + device_operational_status=oper_status, device_drivers=device_drivers, created_at=time.time())) + obj : Optional[DeviceModel] = session.query(DeviceModel)\ + .filter_by(device_uuid=device_uuid).one_or_none() + + stmt = insert(EndPointModel).values(endpoints_data) + stmt = stmt.on_conflict_do_update( + index_elements=[ + EndPointModel.context_uuid, EndPointModel.topology_uuid, EndPointModel.device_uuid, + EndPointModel.endpoint_uuid + ], + set_=dict( + endpoint_type = stmt.excluded.endpoint_type, + kpi_sample_types = stmt.excluded.kpi_sample_types, + ) + ) + session.execute(stmt) + + session.execute(insert(TopologyDeviceModel).values(related_topologies).on_conflict_do_nothing( + index_elements=[ + TopologyDeviceModel.context_uuid, TopologyDeviceModel.topology_uuid, + TopologyDeviceModel.device_uuid + ] + )) + + session.execute(delete(ConfigRuleModel).where(ConfigRuleModel.device_uuid == device_uuid)) + session.execute(insert(ConfigRuleModel).values(config_rules)) + + run_transaction(sessionmaker(bind=db_engine), callback) + return False # TODO: improve and check if created/updated + +def device_delete(db_engine : Engine, request : DeviceId) -> bool: + device_uuid = request.device_uuid.uuid + def callback(session : Session) -> bool: + session.query(TopologyDeviceModel).filter_by(device_uuid=device_uuid).delete() + num_deleted = session.query(DeviceModel).filter_by(device_uuid=device_uuid).delete() + #db_device = session.query(DeviceModel).filter_by(device_uuid=device_uuid).one_or_none() + #session.query(ConfigRuleModel).filter_by(config_uuid=db_device.device_config_uuid).delete() + #session.query(ConfigModel).filter_by(config_uuid=db_device.device_config_uuid).delete() + #session.query(DeviceModel).filter_by(device_uuid=device_uuid).delete() + return num_deleted > 0 + return run_transaction(sessionmaker(bind=db_engine), callback) + + + + +#Union_SpecificConfigRule = Union[ +# ConfigRuleCustomModel, ConfigRuleAclModel +#] +# +#def set_config_rule( +# database : Database, db_config : ConfigModel, position : int, resource_key : str, resource_value : str, +#): # -> Tuple[ConfigRuleModel, bool]: +# +# str_rule_key_hash = fast_hasher(resource_key) +# str_config_rule_key = key_to_str([db_config.config_uuid, str_rule_key_hash], separator=':') +# +# data = {'config_fk': db_config, 'position': position, 'action': ORM_ConfigActionEnum.SET, 'key': resource_key, +# 'value': resource_value} +# to_add = ConfigRuleModel(**data) +# +# result = database.create_or_update(to_add) +# return result +#Tuple_ConfigRuleSpecs = Tuple[Type, str, Dict, ConfigRuleKindEnum] +# +#def parse_config_rule_custom(database : Database, grpc_config_rule) -> Tuple_ConfigRuleSpecs: +# config_rule_class = ConfigRuleCustomModel +# str_config_rule_id = grpc_config_rule.custom.resource_key +# config_rule_data = { +# 'key' : grpc_config_rule.custom.resource_key, +# 'value': grpc_config_rule.custom.resource_value, +# } +# return config_rule_class, str_config_rule_id, config_rule_data, ConfigRuleKindEnum.CUSTOM +# +#def parse_config_rule_acl(database : Database, grpc_config_rule) -> Tuple_ConfigRuleSpecs: +# config_rule_class = ConfigRuleAclModel +# grpc_endpoint_id = grpc_config_rule.acl.endpoint_id +# grpc_rule_set = grpc_config_rule.acl.rule_set +# device_uuid = grpc_endpoint_id.device_id.device_uuid.uuid +# endpoint_uuid = grpc_endpoint_id.endpoint_uuid.uuid +# str_endpoint_key = '/'.join([device_uuid, endpoint_uuid]) +# #str_endpoint_key, db_endpoint = get_endpoint(database, grpc_endpoint_id) +# str_config_rule_id = ':'.join([str_endpoint_key, grpc_rule_set.name]) +# config_rule_data = { +# #'endpoint_fk': db_endpoint, +# 'endpoint_id': grpc_message_to_json_string(grpc_endpoint_id), +# 'acl_data': grpc_message_to_json_string(grpc_rule_set), +# } +# return config_rule_class, str_config_rule_id, config_rule_data, ConfigRuleKindEnum.ACL +# +#CONFIGRULE_PARSERS = { +# 'custom': parse_config_rule_custom, +# 'acl' : parse_config_rule_acl, +#} +# +#Union_ConfigRuleModel = Union[ +# ConfigRuleCustomModel, ConfigRuleAclModel, +#] +# +#def set_config_rule( +# database : Database, db_config : ConfigModel, grpc_config_rule : ConfigRule, position : int +#) -> Tuple[Union_ConfigRuleModel, bool]: +# grpc_config_rule_kind = str(grpc_config_rule.WhichOneof('config_rule')) +# parser = CONFIGRULE_PARSERS.get(grpc_config_rule_kind) +# if parser is None: +# raise NotImplementedError('ConfigRule of kind {:s} is not implemented: {:s}'.format( +# grpc_config_rule_kind, grpc_message_to_json_string(grpc_config_rule))) +# +# # create specific ConfigRule +# config_rule_class, str_config_rule_id, config_rule_data, config_rule_kind = parser(database, grpc_config_rule) +# str_config_rule_key_hash = fast_hasher(':'.join([config_rule_kind.value, str_config_rule_id])) +# str_config_rule_key = key_to_str([db_config.pk, str_config_rule_key_hash], separator=':') +# result : Tuple[Union_ConfigRuleModel, bool] = update_or_create_object( +# database, config_rule_class, str_config_rule_key, config_rule_data) +# db_specific_config_rule, updated = result +# +# # create generic ConfigRule +# config_rule_fk_field_name = 'config_rule_{:s}_fk'.format(config_rule_kind.value) +# config_rule_data = { +# 'config_fk': db_config, 'kind': config_rule_kind, 'position': position, +# 'action': ORM_ConfigActionEnum.SET, +# config_rule_fk_field_name: db_specific_config_rule +# } +# result : Tuple[ConfigRuleModel, bool] = update_or_create_object( +# database, ConfigRuleModel, str_config_rule_key, config_rule_data) +# db_config_rule, updated = result +# +# return db_config_rule, updated +# +#def delete_config_rule( +# database : Database, db_config : ConfigModel, grpc_config_rule : ConfigRule +#) -> None: +# grpc_config_rule_kind = str(grpc_config_rule.WhichOneof('config_rule')) +# parser = CONFIGRULE_PARSERS.get(grpc_config_rule_kind) +# if parser is None: +# raise NotImplementedError('ConfigRule of kind {:s} is not implemented: {:s}'.format( +# grpc_config_rule_kind, grpc_message_to_json_string(grpc_config_rule))) +# +# # delete generic config rules; self deletes specific config rule +# _, str_config_rule_id, _, config_rule_kind = parser(database, grpc_config_rule) +# str_config_rule_key_hash = fast_hasher(':'.join([config_rule_kind.value, str_config_rule_id])) +# str_config_rule_key = key_to_str([db_config.pk, str_config_rule_key_hash], separator=':') +# db_config_rule : Optional[ConfigRuleModel] = get_object( +# database, ConfigRuleModel, str_config_rule_key, raise_if_not_found=False) +# if db_config_rule is None: return +# db_config_rule.delete() +# +#def update_config( +# database : Database, db_parent_pk : str, config_name : str, grpc_config_rules +#) -> List[Tuple[Union[ConfigModel, ConfigRuleModel], bool]]: +# +# str_config_key = key_to_str([config_name, db_parent_pk], separator=':') +# result : Tuple[ConfigModel, bool] = get_or_create_object(database, ConfigModel, str_config_key) +# db_config, created = result +# +# db_objects = [(db_config, created)] +# +# for position,grpc_config_rule in enumerate(grpc_config_rules): +# action = grpc_to_enum__config_action(grpc_config_rule.action) +# +# if action == ORM_ConfigActionEnum.SET: +# result : Tuple[ConfigRuleModel, bool] = set_config_rule( +# database, db_config, grpc_config_rule, position) +# db_config_rule, updated = result +# db_objects.append((db_config_rule, updated)) +# elif action == ORM_ConfigActionEnum.DELETE: +# delete_config_rule(database, db_config, grpc_config_rule) +# else: +# msg = 'Unsupported Action({:s}) for ConfigRule({:s})' +# str_action = str(ConfigActionEnum.Name(action)) +# str_config_rule = grpc_message_to_json_string(grpc_config_rule) +# raise AttributeError(msg.format(str_action, str_config_rule)) +# +# return db_objects diff --git a/src/context/service/database/methods/Link.py b/src/context/service/database/methods/Link.py new file mode 100644 index 000000000..b98578c22 --- /dev/null +++ b/src/context/service/database/methods/Link.py @@ -0,0 +1,120 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import time +from sqlalchemy.dialects.postgresql import insert +from sqlalchemy.engine import Engine +from sqlalchemy.orm import Session, sessionmaker +from sqlalchemy_cockroachdb import run_transaction +from typing import Dict, List, Optional, Set, Tuple +from common.proto.context_pb2 import Link, LinkId, LinkIdList, LinkList +from common.rpc_method_wrapper.ServiceExceptions import NotFoundException +from context.service.database.models.LinkModel import LinkModel +from context.service.database.models.RelationModels import LinkEndPointModel, TopologyLinkModel + +def link_list_ids(db_engine : Engine) -> LinkIdList: + def callback(session : Session) -> List[Dict]: + obj_list : List[LinkModel] = session.query(LinkModel).all() + #.options(selectinload(LinkModel.topology)).filter_by(context_uuid=context_uuid).one_or_none() + return [obj.dump_id() for obj in obj_list] + return LinkIdList(link_ids=run_transaction(sessionmaker(bind=db_engine), callback)) + +def link_list_objs(db_engine : Engine) -> LinkList: + def callback(session : Session) -> List[Dict]: + obj_list : List[LinkModel] = session.query(LinkModel).all() + #.options(selectinload(LinkModel.topology)).filter_by(context_uuid=context_uuid).one_or_none() + return [obj.dump() for obj in obj_list] + return LinkList(links=run_transaction(sessionmaker(bind=db_engine), callback)) + +def link_get(db_engine : Engine, request : LinkId) -> Link: + link_uuid = request.link_uuid.uuid + def callback(session : Session) -> Optional[Dict]: + obj : Optional[LinkModel] = session.query(LinkModel)\ + .filter_by(link_uuid=link_uuid).one_or_none() + return None if obj is None else obj.dump() + obj = run_transaction(sessionmaker(bind=db_engine), callback) + if obj is None: raise NotFoundException('Link', link_uuid) + return Link(**obj) + +def link_set(db_engine : Engine, request : Link) -> bool: + link_uuid = request.link_id.link_uuid.uuid + link_name = request.name + + topology_keys : Set[Tuple[str, str]] = set() + related_topologies : List[Dict] = list() + link_endpoints_data : List[Dict] = list() + for endpoint_id in request.link_endpoint_ids: + context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid + topology_uuid = endpoint_id.topology_id.topology_uuid.uuid + device_uuid = endpoint_id.device_id.device_uuid.uuid + endpoint_uuid = endpoint_id.endpoint_uuid.uuid + + link_endpoints_data.append({ + 'link_uuid' : link_uuid, + 'context_uuid' : context_uuid, + 'topology_uuid': topology_uuid, + 'device_uuid' : device_uuid, + 'endpoint_uuid': endpoint_uuid, + }) + + if len(context_uuid) > 0 and len(topology_uuid) > 0: + topology_key = (context_uuid, topology_uuid) + if topology_key not in topology_keys: + related_topologies.append({ + 'context_uuid': context_uuid, + 'topology_uuid': topology_uuid, + 'link_uuid': link_uuid, + }) + topology_keys.add(topology_key) + + def callback(session : Session) -> None: + obj : Optional[LinkModel] = session.query(LinkModel).with_for_update()\ + .filter_by(link_uuid=link_uuid).one_or_none() + is_update = obj is not None + if is_update: + obj.link_name = link_name + session.merge(obj) + else: + session.add(LinkModel(link_uuid=link_uuid, link_name=link_name, created_at=time.time())) + obj : Optional[LinkModel] = session.query(LinkModel)\ + .filter_by(link_uuid=link_uuid).one_or_none() + + stmt = insert(LinkEndPointModel).values(link_endpoints_data) + stmt = stmt.on_conflict_do_nothing( + index_elements=[ + LinkEndPointModel.link_uuid, LinkEndPointModel.context_uuid, LinkEndPointModel.topology_uuid, + LinkEndPointModel.device_uuid, LinkEndPointModel.endpoint_uuid + ], + ) + session.execute(stmt) + + session.execute(insert(TopologyLinkModel).values(related_topologies).on_conflict_do_nothing( + index_elements=[ + TopologyLinkModel.context_uuid, TopologyLinkModel.topology_uuid, + TopologyLinkModel.link_uuid + ] + )) + run_transaction(sessionmaker(bind=db_engine), callback) + return False # TODO: improve and check if created/updated + +def link_delete(db_engine : Engine, request : LinkId) -> bool: + link_uuid = request.link_uuid.uuid + def callback(session : Session) -> bool: + session.query(TopologyLinkModel).filter_by(link_uuid=link_uuid).delete() + session.query(LinkEndPointModel).filter_by(link_uuid=link_uuid).delete() + num_deleted = session.query(LinkModel).filter_by(link_uuid=link_uuid).delete() + #db_link = session.query(LinkModel).filter_by(link_uuid=link_uuid).one_or_none() + #session.query(LinkModel).filter_by(link_uuid=link_uuid).delete() + return num_deleted > 0 + return run_transaction(sessionmaker(bind=db_engine), callback) diff --git a/src/context/service/database/methods/Topology.py b/src/context/service/database/methods/Topology.py new file mode 100644 index 000000000..f9449e0c3 --- /dev/null +++ b/src/context/service/database/methods/Topology.py @@ -0,0 +1,123 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import time +from sqlalchemy.dialects.postgresql import insert +from sqlalchemy.engine import Engine +from sqlalchemy.orm import Session, sessionmaker +from sqlalchemy_cockroachdb import run_transaction +from typing import Dict, List, Optional, Set +from common.proto.context_pb2 import ContextId, Topology, TopologyId, TopologyIdList, TopologyList +from common.rpc_method_wrapper.ServiceExceptions import NotFoundException +from context.service.database.models.RelationModels import TopologyDeviceModel +from context.service.database.models.TopologyModel import TopologyModel + +def topology_list_ids(db_engine : Engine, request : ContextId) -> TopologyIdList: + context_uuid = request.context_uuid.uuid + def callback(session : Session) -> List[Dict]: + obj_list : List[TopologyModel] = session.query(TopologyModel).filter_by(context_uuid=context_uuid).all() + #.options(selectinload(ContextModel.topology)).filter_by(context_uuid=context_uuid).one_or_none() + return [obj.dump_id() for obj in obj_list] + return TopologyIdList(topology_ids=run_transaction(sessionmaker(bind=db_engine), callback)) + +def topology_list_objs(db_engine : Engine, request : ContextId) -> TopologyList: + context_uuid = request.context_uuid.uuid + def callback(session : Session) -> List[Dict]: + obj_list : List[TopologyModel] = session.query(TopologyModel).filter_by(context_uuid=context_uuid).all() + #.options(selectinload(ContextModel.topology)).filter_by(context_uuid=context_uuid).one_or_none() + return [obj.dump() for obj in obj_list] + return TopologyList(topologies=run_transaction(sessionmaker(bind=db_engine), callback)) + +def topology_get(db_engine : Engine, request : TopologyId) -> Topology: + context_uuid = request.context_id.context_uuid.uuid + topology_uuid = request.topology_uuid.uuid + + def callback(session : Session) -> Optional[Dict]: + obj : Optional[TopologyModel] = session.query(TopologyModel)\ + .filter_by(context_uuid=context_uuid, topology_uuid=topology_uuid).one_or_none() + return None if obj is None else obj.dump() + obj = run_transaction(sessionmaker(bind=db_engine), callback) + if obj is None: + obj_uuid = '{:s}/{:s}'.format(context_uuid, topology_uuid) + raise NotFoundException('Topology', obj_uuid) + return Topology(**obj) + +def topology_set(db_engine : Engine, request : Topology) -> bool: + context_uuid = request.topology_id.context_id.context_uuid.uuid + topology_uuid = request.topology_id.topology_uuid.uuid + topology_name = request.name + + device_uuids : Set[str] = set() + devices_to_add : List[Dict] = list() + for device_id in request.device_ids: + device_uuid = device_id.device_uuid.uuid + if device_uuid in device_uuids: continue + devices_to_add.append({ + 'context_uuid': context_uuid, 'topology_uuid': topology_uuid, 'device_uuid': device_uuid + }) + device_uuids.add(device_uuid) + + link_uuids : Set[str] = set() + links_to_add : List[Dict] = list() + for link_id in request.link_ids: + link_uuid = link_id.link_uuid.uuid + if link_uuid in link_uuids: continue + links_to_add.append({ + 'context_uuid': context_uuid, 'topology_uuid': topology_uuid, 'link_uuid': link_uuid + }) + link_uuids.add(link_uuid) + + def callback(session : Session) -> None: + topology_data = [{ + 'context_uuid' : context_uuid, + 'topology_uuid': topology_uuid, + 'topology_name': topology_name, + 'created_at' : time.time(), + }] + stmt = insert(TopologyModel).values(topology_data) + stmt = stmt.on_conflict_do_update( + index_elements=[TopologyModel.context_uuid, TopologyModel.topology_uuid], + set_=dict(topology_name = stmt.excluded.topology_name) + ) + session.execute(stmt) + + if len(devices_to_add) > 0: + session.execute(insert(TopologyDeviceModel).values(devices_to_add).on_conflict_do_nothing( + index_elements=[ + TopologyDeviceModel.context_uuid, TopologyDeviceModel.topology_uuid, + TopologyDeviceModel.device_uuid + ] + )) + + #if len(link_to_add) > 0: + # session.execute(insert(TopologyLinkModel).values(links_to_add).on_conflict_do_nothing( + # index_elements=[ + # TopologyLinkModel.context_uuid, TopologyLinkModel.topology_uuid, + # TopologyLinkModel.link_uuid + # ] + # )) + + run_transaction(sessionmaker(bind=db_engine), callback) + return False # TODO: improve and check if created/updated + +def topology_delete(db_engine : Engine, request : TopologyId) -> bool: + context_uuid = request.context_id.context_uuid.uuid + topology_uuid = request.topology_uuid.uuid + + def callback(session : Session) -> bool: + num_deleted = session.query(TopologyModel)\ + .filter_by(context_uuid=context_uuid, topology_uuid=topology_uuid).delete() + return num_deleted > 0 + + return run_transaction(sessionmaker(bind=db_engine), callback) diff --git a/src/context/service/database/methods/__init__.py b/src/context/service/database/methods/__init__.py new file mode 100644 index 000000000..9953c8205 --- /dev/null +++ b/src/context/service/database/methods/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/src/context/service/database/models/ConfigRuleModel.py b/src/context/service/database/models/ConfigRuleModel.py new file mode 100644 index 000000000..d5a37eed2 --- /dev/null +++ b/src/context/service/database/models/ConfigRuleModel.py @@ -0,0 +1,44 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import enum, json +from sqlalchemy import Column, ForeignKey, INTEGER, CheckConstraint, Enum, String, text +from sqlalchemy.dialects.postgresql import UUID +from sqlalchemy.orm import relationship +from typing import Dict +from .enums.ConfigAction import ORM_ConfigActionEnum +from ._Base import _Base + +# enum values should match name of field in ConfigRuleModel +class ConfigRuleKindEnum(enum.Enum): + CUSTOM = 'custom' + ACL = 'acl' + +class ConfigRuleModel(_Base): + __tablename__ = 'config_rule' + device_uuid = Column(UUID(as_uuid=False), ForeignKey('device.device_uuid', ondelete='CASCADE'), primary_key=True) + rule_uuid = Column(UUID(as_uuid=False), primary_key=True, server_default=text('uuid_generate_v4()')) + kind = Column(Enum(ConfigRuleKindEnum)) + action = Column(Enum(ORM_ConfigActionEnum)) + position = Column(INTEGER, nullable=False) + data = Column(String, nullable=False) + + __table_args__ = ( + CheckConstraint(position >= 0, name='check_position_value'), + ) + + device = relationship('DeviceModel', back_populates='config_rules') + + def dump(self) -> Dict: + return {self.kind.value: json.loads(self.data)} diff --git a/src/context/service/database/ConnectionModel.py b/src/context/service/database/models/ConnectionModel.py similarity index 97% rename from src/context/service/database/ConnectionModel.py rename to src/context/service/database/models/ConnectionModel.py index e780ccb68..546fb7a80 100644 --- a/src/context/service/database/ConnectionModel.py +++ b/src/context/service/database/models/ConnectionModel.py @@ -24,19 +24,21 @@ from common.orm.HighLevel import get_object, get_or_create_object, get_related_o from common.proto.context_pb2 import EndPointId from .EndPointModel import EndPointModel from .ServiceModel import ServiceModel -from .Tools import remove_dict_key +def remove_dict_key(dictionary : Dict, key : str): + dictionary.pop(key, None) + return dictionary from sqlalchemy import Column, Enum, ForeignKey, Integer, CheckConstraint from typing import Dict, List from common.orm.HighLevel import get_related_objects from common.proto.context_pb2 import ServiceStatusEnum, ServiceTypeEnum -from .ConfigModel import ConfigModel +from .ConfigRuleModel import ConfigModel from .ConstraintModel import ConstraintsModel -from .ContextModel import ContextModel +from .models.ContextModel import ContextModel from .Tools import grpc_to_enum from sqlalchemy.dialects.postgresql import UUID -from context.service.database._Base import Base +from context.service.database.models._Base import Base import enum LOGGER = logging.getLogger(__name__) diff --git a/src/context/service/database/ConstraintModel.py b/src/context/service/database/models/ConstraintModel.py similarity index 98% rename from src/context/service/database/ConstraintModel.py rename to src/context/service/database/models/ConstraintModel.py index 30d900300..d616c3a7f 100644 --- a/src/context/service/database/ConstraintModel.py +++ b/src/context/service/database/models/ConstraintModel.py @@ -19,14 +19,17 @@ from common.orm.backend.Tools import key_to_str from common.proto.context_pb2 import Constraint from common.tools.grpc.Tools import grpc_message_to_json_string from .EndPointModel import EndPointModel -from .Tools import fast_hasher, remove_dict_key +from .Tools import fast_hasher from sqlalchemy import Column, ForeignKey, String, Float, CheckConstraint, Integer, Boolean, Enum from sqlalchemy.dialects.postgresql import UUID -from context.service.database._Base import Base +from context.service.database.models._Base import Base import enum LOGGER = logging.getLogger(__name__) +def remove_dict_key(dictionary : Dict, key : str): + dictionary.pop(key, None) + return dictionary class ConstraintsModel(Base): # pylint: disable=abstract-method __tablename__ = 'Constraints' diff --git a/src/context/service/database/ContextModel.py b/src/context/service/database/models/ContextModel.py similarity index 86% rename from src/context/service/database/ContextModel.py rename to src/context/service/database/models/ContextModel.py index ae8cf995f..a5ddeb596 100644 --- a/src/context/service/database/ContextModel.py +++ b/src/context/service/database/models/ContextModel.py @@ -24,9 +24,9 @@ class ContextModel(_Base): context_name = Column(String(), nullable=False) created_at = Column(Float) - topology = relationship('TopologyModel', back_populates='context') - #service = relationship('ServiceModel', back_populates='context') - #slice = relationship('SliceModel', back_populates='context') + topologies = relationship('TopologyModel', back_populates='context') + #services = relationship('ServiceModel', back_populates='context') + #slices = relationship('SliceModel', back_populates='context') def dump_id(self) -> Dict: return {'context_uuid': {'uuid': self.context_uuid}} @@ -38,7 +38,7 @@ class ContextModel(_Base): return { 'context_id' : self.dump_id(), 'name' : self.context_name, - 'topology_ids': [obj.dump_id() for obj in self.topology], - #'service_ids' : [obj.dump_id() for obj in self.service ], - #'slice_ids' : [obj.dump_id() for obj in self.slice ], + 'topology_ids': [obj.dump_id() for obj in self.topologies], + #'service_ids' : [obj.dump_id() for obj in self.services ], + #'slice_ids' : [obj.dump_id() for obj in self.slices ], } diff --git a/src/context/service/database/models/DeviceModel.py b/src/context/service/database/models/DeviceModel.py new file mode 100644 index 000000000..fb5853482 --- /dev/null +++ b/src/context/service/database/models/DeviceModel.py @@ -0,0 +1,52 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import operator +from typing import Dict +from sqlalchemy import Column, Float, String, Enum +from sqlalchemy.dialects.postgresql import UUID, ARRAY +from sqlalchemy.orm import relationship +from ._Base import _Base +from .enums.DeviceDriver import ORM_DeviceDriverEnum +from .enums.DeviceOperationalStatus import ORM_DeviceOperationalStatusEnum + +class DeviceModel(_Base): + __tablename__ = 'device' + device_uuid = Column(UUID(as_uuid=False), primary_key=True) + device_name = Column(String, nullable=False) + device_type = Column(String, nullable=False) + device_operational_status = Column(Enum(ORM_DeviceOperationalStatusEnum)) + device_drivers = Column(ARRAY(Enum(ORM_DeviceDriverEnum), dimensions=1)) + created_at = Column(Float) + + topology_devices = relationship('TopologyDeviceModel', back_populates='device') + config_rules = relationship('ConfigRuleModel', passive_deletes=True, back_populates='device', lazy='joined') + endpoints = relationship('EndPointModel', passive_deletes=True, back_populates='device', lazy='joined') + + def dump_id(self) -> Dict: + return {'device_uuid': {'uuid': self.device_uuid}} + + def dump(self) -> Dict: + return { + 'device_id' : self.dump_id(), + 'name' : self.device_name, + 'device_type' : self.device_type, + 'device_operational_status': self.device_operational_status.value, + 'device_drivers' : [driver.value for driver in self.device_drivers], + 'device_config' : {'config_rules': [ + config_rule.dump() + for config_rule in sorted(self.config_rules, key=operator.attrgetter('position')) + ]}, + 'device_endpoints' : [endpoint.dump() for endpoint in self.endpoints], + } diff --git a/src/context/service/database/EndPointModel.py b/src/context/service/database/models/EndPointModel.py similarity index 82% rename from src/context/service/database/EndPointModel.py rename to src/context/service/database/models/EndPointModel.py index a8d3c2c69..b7e4c9fe3 100644 --- a/src/context/service/database/EndPointModel.py +++ b/src/context/service/database/models/EndPointModel.py @@ -12,24 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -import enum, functools from typing import Dict from sqlalchemy import Column, String, Enum, ForeignKeyConstraint from sqlalchemy.dialects.postgresql import ARRAY, UUID from sqlalchemy.orm import relationship -from common.proto.kpi_sample_types_pb2 import KpiSampleType +from .enums.KpiSampleType import ORM_KpiSampleTypeEnum from ._Base import _Base -from .Tools import grpc_to_enum - -class ORM_KpiSampleTypeEnum(enum.Enum): - UNKNOWN = KpiSampleType.KPISAMPLETYPE_UNKNOWN - PACKETS_TRANSMITTED = KpiSampleType.KPISAMPLETYPE_PACKETS_TRANSMITTED - PACKETS_RECEIVED = KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED - BYTES_TRANSMITTED = KpiSampleType.KPISAMPLETYPE_BYTES_TRANSMITTED - BYTES_RECEIVED = KpiSampleType.KPISAMPLETYPE_BYTES_RECEIVED - -grpc_to_enum__kpi_sample_type = functools.partial( - grpc_to_enum, KpiSampleType, ORM_KpiSampleTypeEnum) class EndPointModel(_Base): __tablename__ = 'endpoint' @@ -51,8 +39,9 @@ class EndPointModel(_Base): ondelete='CASCADE'), ) - topology = relationship('TopologyModel', back_populates='endpoints') - device = relationship('DeviceModel', back_populates='endpoints') + topology = relationship('TopologyModel', back_populates='endpoints') + device = relationship('DeviceModel', back_populates='endpoints') + link_endpoints = relationship('LinkEndPointModel', back_populates='endpoint') def dump_id(self) -> Dict: result = { diff --git a/src/context/service/database/models/LinkModel.py b/src/context/service/database/models/LinkModel.py new file mode 100644 index 000000000..df173f527 --- /dev/null +++ b/src/context/service/database/models/LinkModel.py @@ -0,0 +1,41 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Dict +from sqlalchemy import Column, Float, String +from sqlalchemy.dialects.postgresql import UUID +from sqlalchemy.orm import relationship +from ._Base import _Base + +class LinkModel(_Base): + __tablename__ = 'link' + link_uuid = Column(UUID(as_uuid=False), primary_key=True) + link_name = Column(String, nullable=False) + created_at = Column(Float) + + topology_links = relationship('TopologyLinkModel', back_populates='link') + link_endpoints = relationship('LinkEndPointModel', back_populates='link') #, lazy='joined') + + def dump_id(self) -> Dict: + return {'link_uuid': {'uuid': self.link_uuid}} + + def dump(self) -> Dict: + return { + 'link_id' : self.dump_id(), + 'name' : self.link_name, + 'link_endpoint_ids': [ + link_endpoint.endpoint.dump_id() + for link_endpoint in self.link_endpoints + ], + } diff --git a/src/context/service/database/PolicyRuleModel.py b/src/context/service/database/models/PolicyRuleModel.py similarity index 100% rename from src/context/service/database/PolicyRuleModel.py rename to src/context/service/database/models/PolicyRuleModel.py diff --git a/src/context/service/database/RelationModels.py b/src/context/service/database/models/RelationModels.py similarity index 57% rename from src/context/service/database/RelationModels.py rename to src/context/service/database/models/RelationModels.py index bcf85d005..6cc4ff86c 100644 --- a/src/context/service/database/RelationModels.py +++ b/src/context/service/database/models/RelationModels.py @@ -16,7 +16,7 @@ import logging from sqlalchemy import Column, ForeignKey, ForeignKeyConstraint from sqlalchemy.dialects.postgresql import UUID from sqlalchemy.orm import relationship -from context.service.database._Base import _Base +from context.service.database.models._Base import _Base LOGGER = logging.getLogger(__name__) @@ -24,27 +24,43 @@ LOGGER = logging.getLogger(__name__) # pk = PrimaryKeyField() # connection_fk = ForeignKeyField(ConnectionModel) # sub_service_fk = ForeignKeyField(ServiceModel) -# -#class LinkEndPointModel(Base): -# __tablename__ = 'LinkEndPoint' -# # uuid = Column(UUID(as_uuid=False), primary_key=True, unique=True) + + # link_uuid = Column(UUID(as_uuid=False), ForeignKey("Link.link_uuid")) # endpoint_uuid = Column(UUID(as_uuid=False), ForeignKey("EndPoint.endpoint_uuid"), primary_key=True) -# -# @staticmethod -# def main_pk_name(): -# return 'endpoint_uuid' -# + +class LinkEndPointModel(_Base): + __tablename__ = 'link_endpoint' + link_uuid = Column(UUID(as_uuid=False), primary_key=True) + context_uuid = Column(UUID(as_uuid=False), primary_key=True) + topology_uuid = Column(UUID(as_uuid=False), primary_key=True) + device_uuid = Column(UUID(as_uuid=False), primary_key=True) + endpoint_uuid = Column(UUID(as_uuid=False), primary_key=True) + + link = relationship('LinkModel', back_populates='link_endpoints', lazy='joined') + endpoint = relationship('EndPointModel', back_populates='link_endpoints', lazy='joined') + + __table_args__ = ( + ForeignKeyConstraint( + ['link_uuid'], + ['link.link_uuid'], + ondelete='CASCADE'), + ForeignKeyConstraint( + ['context_uuid', 'topology_uuid', 'device_uuid', 'endpoint_uuid'], + ['endpoint.context_uuid', 'endpoint.topology_uuid', 'endpoint.device_uuid', 'endpoint.endpoint_uuid'], + ondelete='CASCADE'), + ) + # class ServiceEndPointModel(Model): # pk = PrimaryKeyField() # service_fk = ForeignKeyField(ServiceModel) # endpoint_fk = ForeignKeyField(EndPointModel) -# + # class SliceEndPointModel(Model): # pk = PrimaryKeyField() # slice_fk = ForeignKeyField(SliceModel) # endpoint_fk = ForeignKeyField(EndPointModel) -# + # class SliceServiceModel(Model): # pk = PrimaryKeyField() # slice_fk = ForeignKeyField(SliceModel) @@ -54,7 +70,7 @@ LOGGER = logging.getLogger(__name__) # link_uuid = Column(UUID(as_uuid=False), ForeignKey("Link.link_uuid")) # endpoint_uuid = Column(UUID(as_uuid=False), ForeignKey("EndPoint.endpoint_uuid")) #del) -# + # class SliceSubSliceModel(Model): # pk = PrimaryKeyField() # slice_fk = ForeignKeyField(SliceModel) @@ -66,8 +82,8 @@ class TopologyDeviceModel(_Base): topology_uuid = Column(UUID(as_uuid=False), primary_key=True) device_uuid = Column(UUID(as_uuid=False), primary_key=True) - topologies = relationship('TopologyModel', back_populates='topology_device') - devices = relationship('DeviceModel', back_populates='topology_device') + topology = relationship('TopologyModel', back_populates='topology_devices', lazy='joined') + device = relationship('DeviceModel', back_populates='topology_devices', lazy='joined') __table_args__ = ( ForeignKeyConstraint( @@ -80,7 +96,22 @@ class TopologyDeviceModel(_Base): ondelete='CASCADE'), ) -#class TopologyLinkModel(Base): -# __tablename__ = 'TopologyLink' -# topology_uuid = Column(UUID(as_uuid=False), ForeignKey("Topology.topology_uuid")) -# link_uuid = Column(UUID(as_uuid=False), ForeignKey("Link.link_uuid"), primary_key=True) +class TopologyLinkModel(_Base): + __tablename__ = 'topology_link' + context_uuid = Column(UUID(as_uuid=False), primary_key=True) + topology_uuid = Column(UUID(as_uuid=False), primary_key=True) + link_uuid = Column(UUID(as_uuid=False), primary_key=True) + + topology = relationship('TopologyModel', back_populates='topology_links', lazy='joined') + link = relationship('LinkModel', back_populates='topology_links', lazy='joined') + + __table_args__ = ( + ForeignKeyConstraint( + ['context_uuid', 'topology_uuid'], + ['topology.context_uuid', 'topology.topology_uuid'], + ondelete='CASCADE'), + ForeignKeyConstraint( + ['link_uuid'], + ['link.link_uuid'], + ondelete='CASCADE'), + ) diff --git a/src/context/service/database/ServiceModel.py b/src/context/service/database/models/ServiceModel.py similarity index 97% rename from src/context/service/database/ServiceModel.py rename to src/context/service/database/models/ServiceModel.py index 20e10ddd5..c06baca32 100644 --- a/src/context/service/database/ServiceModel.py +++ b/src/context/service/database/models/ServiceModel.py @@ -17,12 +17,12 @@ from sqlalchemy import Column, Enum, ForeignKey from typing import Dict, List from common.orm.HighLevel import get_related_objects from common.proto.context_pb2 import ServiceStatusEnum, ServiceTypeEnum -from .ConfigModel import ConfigModel +from .ConfigRuleModel import ConfigModel from .ConstraintModel import ConstraintsModel -from .ContextModel import ContextModel +from .models.ContextModel import ContextModel from .Tools import grpc_to_enum from sqlalchemy.dialects.postgresql import UUID -from context.service.database._Base import Base +from context.service.database.models._Base import Base import enum LOGGER = logging.getLogger(__name__) diff --git a/src/context/service/database/SliceModel.py b/src/context/service/database/models/SliceModel.py similarity index 98% rename from src/context/service/database/SliceModel.py rename to src/context/service/database/models/SliceModel.py index 74bb60b40..2b03e6122 100644 --- a/src/context/service/database/SliceModel.py +++ b/src/context/service/database/models/SliceModel.py @@ -22,9 +22,9 @@ from common.orm.fields.StringField import StringField from common.orm.model.Model import Model from common.orm.HighLevel import get_related_objects from common.proto.context_pb2 import SliceStatusEnum -from .ConfigModel import ConfigModel +from .ConfigRuleModel import ConfigModel from .ConstraintModel import ConstraintsModel -from .ContextModel import ContextModel +from .models.ContextModel import ContextModel from .Tools import grpc_to_enum LOGGER = logging.getLogger(__name__) diff --git a/src/context/service/database/TopologyModel.py b/src/context/service/database/models/TopologyModel.py similarity index 77% rename from src/context/service/database/TopologyModel.py rename to src/context/service/database/models/TopologyModel.py index 57fe1b347..95f7a6350 100644 --- a/src/context/service/database/TopologyModel.py +++ b/src/context/service/database/models/TopologyModel.py @@ -26,10 +26,10 @@ class TopologyModel(_Base): created_at = Column(Float) # Relationships - context = relationship('ContextModel', back_populates='topology') - topology_device = relationship('TopologyDeviceModel', back_populates='topologies') - #topology_link = relationship('TopologyLinkModel', back_populates='topology') - endpoints = relationship('EndPointModel', back_populates='topology') + context = relationship('ContextModel', back_populates='topologies') + topology_devices = relationship('TopologyDeviceModel', back_populates='topology') + topology_links = relationship('TopologyLinkModel', back_populates='topology') + endpoints = relationship('EndPointModel', back_populates='topology') def dump_id(self) -> Dict: return { @@ -41,6 +41,6 @@ class TopologyModel(_Base): return { 'topology_id': self.dump_id(), 'name' : self.topology_name, - 'device_ids' : [{'device_uuid': {'uuid': td.device_uuid}} for td in self.topology_device], - #'link_ids' : [{'link_uuid' : {'uuid': td.link_uuid }} for td in self.topology_link ], + 'device_ids' : [{'device_uuid': {'uuid': td.device_uuid}} for td in self.topology_devices], + 'link_ids' : [{'link_uuid' : {'uuid': td.link_uuid }} for td in self.topology_links ], } diff --git a/src/context/service/database/_Base.py b/src/context/service/database/models/_Base.py similarity index 100% rename from src/context/service/database/_Base.py rename to src/context/service/database/models/_Base.py diff --git a/src/context/service/database/models/__init__.py b/src/context/service/database/models/__init__.py new file mode 100644 index 000000000..9953c8205 --- /dev/null +++ b/src/context/service/database/models/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/src/context/service/database/models/enums/ConfigAction.py b/src/context/service/database/models/enums/ConfigAction.py new file mode 100644 index 000000000..6bbcdea99 --- /dev/null +++ b/src/context/service/database/models/enums/ConfigAction.py @@ -0,0 +1,25 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import enum, functools +from common.proto.context_pb2 import ConfigActionEnum +from ._GrpcToEnum import grpc_to_enum + +class ORM_ConfigActionEnum(enum.Enum): + UNDEFINED = ConfigActionEnum.CONFIGACTION_UNDEFINED + SET = ConfigActionEnum.CONFIGACTION_SET + DELETE = ConfigActionEnum.CONFIGACTION_DELETE + +grpc_to_enum__config_action = functools.partial( + grpc_to_enum, ConfigActionEnum, ORM_ConfigActionEnum) diff --git a/src/context/service/database/models/enums/DeviceDriver.py b/src/context/service/database/models/enums/DeviceDriver.py new file mode 100644 index 000000000..21338ddb8 --- /dev/null +++ b/src/context/service/database/models/enums/DeviceDriver.py @@ -0,0 +1,29 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import enum, functools +from common.proto.context_pb2 import DeviceDriverEnum +from ._GrpcToEnum import grpc_to_enum + +class ORM_DeviceDriverEnum(enum.Enum): + UNDEFINED = DeviceDriverEnum.DEVICEDRIVER_UNDEFINED + OPENCONFIG = DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG + TRANSPORT_API = DeviceDriverEnum.DEVICEDRIVER_TRANSPORT_API + P4 = DeviceDriverEnum.DEVICEDRIVER_P4 + IETF_NETWORK_TOPOLOGY = DeviceDriverEnum.DEVICEDRIVER_IETF_NETWORK_TOPOLOGY + ONF_TR_352 = DeviceDriverEnum.DEVICEDRIVER_ONF_TR_352 + XR = DeviceDriverEnum.DEVICEDRIVER_XR + +grpc_to_enum__device_driver = functools.partial( + grpc_to_enum, DeviceDriverEnum, ORM_DeviceDriverEnum) diff --git a/src/context/service/database/models/enums/DeviceOperationalStatus.py b/src/context/service/database/models/enums/DeviceOperationalStatus.py new file mode 100644 index 000000000..2bfe60779 --- /dev/null +++ b/src/context/service/database/models/enums/DeviceOperationalStatus.py @@ -0,0 +1,25 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import enum, functools +from common.proto.context_pb2 import DeviceOperationalStatusEnum +from ._GrpcToEnum import grpc_to_enum + +class ORM_DeviceOperationalStatusEnum(enum.Enum): + UNDEFINED = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_UNDEFINED + DISABLED = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_DISABLED + ENABLED = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED + +grpc_to_enum__device_operational_status = functools.partial( + grpc_to_enum, DeviceOperationalStatusEnum, ORM_DeviceOperationalStatusEnum) diff --git a/src/context/service/database/models/enums/KpiSampleType.py b/src/context/service/database/models/enums/KpiSampleType.py new file mode 100644 index 000000000..4126e90b2 --- /dev/null +++ b/src/context/service/database/models/enums/KpiSampleType.py @@ -0,0 +1,27 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import enum, functools +from common.proto.kpi_sample_types_pb2 import KpiSampleType +from ._GrpcToEnum import grpc_to_enum + +class ORM_KpiSampleTypeEnum(enum.Enum): + UNKNOWN = KpiSampleType.KPISAMPLETYPE_UNKNOWN + PACKETS_TRANSMITTED = KpiSampleType.KPISAMPLETYPE_PACKETS_TRANSMITTED + PACKETS_RECEIVED = KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED + BYTES_TRANSMITTED = KpiSampleType.KPISAMPLETYPE_BYTES_TRANSMITTED + BYTES_RECEIVED = KpiSampleType.KPISAMPLETYPE_BYTES_RECEIVED + +grpc_to_enum__kpi_sample_type = functools.partial( + grpc_to_enum, KpiSampleType, ORM_KpiSampleTypeEnum) diff --git a/src/context/service/database/models/enums/_GrpcToEnum.py b/src/context/service/database/models/enums/_GrpcToEnum.py new file mode 100644 index 000000000..df70399f9 --- /dev/null +++ b/src/context/service/database/models/enums/_GrpcToEnum.py @@ -0,0 +1,32 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re +from enum import Enum + +# Enumeration classes are redundant with gRPC classes, but gRPC does not provide a programmatical method to retrieve +# the values it expects from strings containing the desired value symbol or its integer value, so a kind of mapping is +# required. Besides, ORM Models expect Enum classes in EnumeratedFields; we create specific and conveniently defined +# Enum classes to serve both purposes. + +def grpc_to_enum(grpc_enum_class, orm_enum_class : Enum, grpc_enum_value): + grpc_enum_name = grpc_enum_class.Name(grpc_enum_value) + grpc_enum_prefix = orm_enum_class.__name__.upper() + #grpc_enum_prefix = re.sub(r'^ORM_(.+)$', r'\1', grpc_enum_prefix) + #grpc_enum_prefix = re.sub(r'^(.+)ENUM$', r'\1', grpc_enum_prefix) + #grpc_enum_prefix = grpc_enum_prefix + '_' + grpc_enum_prefix = re.sub(r'^ORM_(.+)ENUM$', r'\1_', grpc_enum_prefix) + orm_enum_name = grpc_enum_name.replace(grpc_enum_prefix, '') + orm_enum_value = orm_enum_class._member_map_.get(orm_enum_name) + return orm_enum_value diff --git a/src/context/service/database/models/enums/__init__.py b/src/context/service/database/models/enums/__init__.py new file mode 100644 index 000000000..9953c8205 --- /dev/null +++ b/src/context/service/database/models/enums/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/src/context/service/database/Tools.py b/src/context/service/database/tools/FastHasher.py similarity index 63% rename from src/context/service/database/Tools.py rename to src/context/service/database/tools/FastHasher.py index 44a5aa264..6632a1c79 100644 --- a/src/context/service/database/Tools.py +++ b/src/context/service/database/tools/FastHasher.py @@ -12,31 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import hashlib, re -from enum import Enum -from typing import Dict, List, Tuple, Union -import logging -# Convenient helper function to remove dictionary items in dict/list/set comprehensions. -LOGGER = logging.getLogger(__name__) - -def remove_dict_key(dictionary : Dict, key : str): - dictionary.pop(key, None) - return dictionary - -# Enumeration classes are redundant with gRPC classes, but gRPC does not provide a programmatical method to retrieve -# the values it expects from strings containing the desired value symbol or its integer value, so a kind of mapping is -# required. Besides, ORM Models expect Enum classes in EnumeratedFields; we create specific and conveniently defined -# Enum classes to serve both purposes. - -def grpc_to_enum(grpc_enum_class, orm_enum_class : Enum, grpc_enum_value): - grpc_enum_name = grpc_enum_class.Name(grpc_enum_value) - grpc_enum_prefix = orm_enum_class.__name__.upper() - grpc_enum_prefix = re.sub(r'^ORM_(.+)$', r'\1', grpc_enum_prefix) - grpc_enum_prefix = re.sub(r'^(.+)ENUM$', r'\1', grpc_enum_prefix) - grpc_enum_prefix = grpc_enum_prefix + '_' - orm_enum_name = grpc_enum_name.replace(grpc_enum_prefix, '') - orm_enum_value = orm_enum_class._member_map_.get(orm_enum_name) # pylint: disable=protected-access - return orm_enum_value +import hashlib +from typing import List, Tuple, Union # For some models, it is convenient to produce a string hash for fast comparisons of existence or modification. Method # fast_hasher computes configurable length (between 1 and 64 byte) hashes and retrieves them in hex representation. diff --git a/src/context/service/database/tools/__init__.py b/src/context/service/database/tools/__init__.py new file mode 100644 index 000000000..9953c8205 --- /dev/null +++ b/src/context/service/database/tools/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/src/context/tests/_test_connection.py b/src/context/tests/_test_connection.py new file mode 100644 index 000000000..b6060df68 --- /dev/null +++ b/src/context/tests/_test_connection.py @@ -0,0 +1,280 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy, grpc, pytest +from typing import Tuple +from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID +from common.proto.context_pb2 import ( + Connection, ConnectionEvent, ConnectionId, Context, ContextEvent, ContextId, Device, DeviceEvent, DeviceId, + EventTypeEnum, Service, ServiceEvent, ServiceId, Topology, TopologyEvent, TopologyId) +from context.client.ContextClient import ContextClient +from context.client.EventsCollector import EventsCollector +from .Objects import ( + CONNECTION_R1_R3, CONNECTION_R1_R3_ID, CONNECTION_R1_R3_UUID, CONTEXT, CONTEXT_ID, DEVICE_R1, DEVICE_R1_ID, + DEVICE_R1_UUID, DEVICE_R2, DEVICE_R2_ID, DEVICE_R2_UUID, DEVICE_R3, DEVICE_R3_ID, DEVICE_R3_UUID, SERVICE_R1_R2, + SERVICE_R1_R2_ID, SERVICE_R1_R2_UUID, SERVICE_R1_R3, SERVICE_R1_R3_ID, SERVICE_R1_R3_UUID, SERVICE_R2_R3, + SERVICE_R2_R3_ID, SERVICE_R2_R3_UUID, TOPOLOGY, TOPOLOGY_ID) + +def grpc_connection( + context_client_grpc : ContextClient, # pylint: disable=redefined-outer-name + context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name + Session = context_db_mb[0] + + database = Database(Session) + + # ----- Clean the database ----------------------------------------------------------------------------------------- + database.clear() + + # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- + events_collector = EventsCollector(context_client_grpc) + events_collector.start() + + # ----- Prepare dependencies for the test and capture related events ----------------------------------------------- + response = context_client_grpc.SetContext(Context(**CONTEXT)) + assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID + + response = context_client_grpc.SetTopology(Topology(**TOPOLOGY)) + assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + + response = context_client_grpc.SetDevice(Device(**DEVICE_R1)) + assert response.device_uuid.uuid == DEVICE_R1_UUID + + response = context_client_grpc.SetDevice(Device(**DEVICE_R2)) + assert response.device_uuid.uuid == DEVICE_R2_UUID + + response = context_client_grpc.SetDevice(Device(**DEVICE_R3)) + assert response.device_uuid.uuid == DEVICE_R3_UUID + + response = context_client_grpc.SetService(Service(**SERVICE_R1_R2)) + assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.service_uuid.uuid == SERVICE_R1_R2_UUID + + CONTEXT_WITH_SERVICE = copy.deepcopy(CONTEXT) + CONTEXT_WITH_SERVICE['service_ids'].append(SERVICE_R1_R2_ID) + response = context_client_grpc.SetContext(Context(**CONTEXT_WITH_SERVICE)) + assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID + + response = context_client_grpc.SetService(Service(**SERVICE_R2_R3)) + assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.service_uuid.uuid == SERVICE_R2_R3_UUID + + CONTEXT_WITH_SERVICE = copy.deepcopy(CONTEXT) + CONTEXT_WITH_SERVICE['service_ids'].append(SERVICE_R2_R3_ID) + response = context_client_grpc.SetContext(Context(**CONTEXT_WITH_SERVICE)) + assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID + + response = context_client_grpc.SetService(Service(**SERVICE_R1_R3)) + assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.service_uuid.uuid == SERVICE_R1_R3_UUID + + CONTEXT_WITH_SERVICE = copy.deepcopy(CONTEXT) + CONTEXT_WITH_SERVICE['service_ids'].append(SERVICE_R1_R3_ID) + response = context_client_grpc.SetContext(Context(**CONTEXT_WITH_SERVICE)) + assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID + + events = events_collector.get_events(block=True, count=11) + + assert isinstance(events[0], ContextEvent) + assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert events[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + + assert isinstance(events[1], TopologyEvent) + assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert events[1].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert events[1].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + + assert isinstance(events[2], DeviceEvent) + assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert events[2].device_id.device_uuid.uuid == DEVICE_R1_UUID + + assert isinstance(events[3], DeviceEvent) + assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert events[3].device_id.device_uuid.uuid == DEVICE_R2_UUID + + assert isinstance(events[4], DeviceEvent) + assert events[4].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert events[4].device_id.device_uuid.uuid == DEVICE_R3_UUID + + assert isinstance(events[5], ServiceEvent) + assert events[5].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert events[5].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert events[5].service_id.service_uuid.uuid == SERVICE_R1_R2_UUID + + assert isinstance(events[6], ContextEvent) + assert events[6].event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + assert events[6].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + + assert isinstance(events[7], ServiceEvent) + assert events[7].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert events[7].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert events[7].service_id.service_uuid.uuid == SERVICE_R2_R3_UUID + + assert isinstance(events[8], ContextEvent) + assert events[8].event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + assert events[8].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + + assert isinstance(events[9], ServiceEvent) + assert events[9].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert events[9].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert events[9].service_id.service_uuid.uuid == SERVICE_R1_R3_UUID + + assert isinstance(events[10], ContextEvent) + assert events[10].event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + assert events[10].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + + # ----- Get when the object does not exist ------------------------------------------------------------------------- + with pytest.raises(grpc.RpcError) as e: + context_client_grpc.GetConnection(ConnectionId(**CONNECTION_R1_R3_ID)) + assert e.value.code() == grpc.StatusCode.NOT_FOUND + assert e.value.details() == 'Connection({:s}) not found'.format(CONNECTION_R1_R3_UUID) + + # ----- List when the object does not exist ------------------------------------------------------------------------ + response = context_client_grpc.ListConnectionIds(ServiceId(**SERVICE_R1_R3_ID)) + assert len(response.connection_ids) == 0 + + response = context_client_grpc.ListConnections(ServiceId(**SERVICE_R1_R3_ID)) + assert len(response.connections) == 0 + + # ----- Dump state of database before create the object ------------------------------------------------------------ + db_entries = context_database.dump() + LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) + for db_entry in db_entries: + LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover + LOGGER.info('-----------------------------------------------------------') + assert len(db_entries) == 187 + + # ----- Create the object ------------------------------------------------------------------------------------------ + with pytest.raises(grpc.RpcError) as e: + WRONG_CONNECTION = copy.deepcopy(CONNECTION_R1_R3) + WRONG_CONNECTION['path_hops_endpoint_ids'][0]\ + ['topology_id']['context_id']['context_uuid']['uuid'] = 'wrong-context-uuid' + context_client_grpc.SetConnection(Connection(**WRONG_CONNECTION)) + assert e.value.code() == grpc.StatusCode.NOT_FOUND + # TODO: should we check that all endpoints belong to same topology? + # TODO: should we check that endpoints form links over the topology? + msg = 'EndPoint({:s}/{:s}:wrong-context-uuid/{:s}) not found'.format( + DEVICE_R1_UUID, WRONG_CONNECTION['path_hops_endpoint_ids'][0]['endpoint_uuid']['uuid'], DEFAULT_TOPOLOGY_UUID) + assert e.value.details() == msg + + response = context_client_grpc.SetConnection(Connection(**CONNECTION_R1_R3)) + assert response.connection_uuid.uuid == CONNECTION_R1_R3_UUID + + # ----- Check create event ----------------------------------------------------------------------------------------- + event = events_collector.get_event(block=True) + assert isinstance(event, ConnectionEvent) + assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert event.connection_id.connection_uuid.uuid == CONNECTION_R1_R3_UUID + + # ----- Update the object ------------------------------------------------------------------------------------------ + response = context_client_grpc.SetConnection(Connection(**CONNECTION_R1_R3)) + assert response.connection_uuid.uuid == CONNECTION_R1_R3_UUID + + # ----- Check update event ----------------------------------------------------------------------------------------- + event = events_collector.get_event(block=True) + assert isinstance(event, ConnectionEvent) + assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + assert event.connection_id.connection_uuid.uuid == CONNECTION_R1_R3_UUID + + # ----- Dump state of database after create/update the object ------------------------------------------------------ + db_entries = context_database.dump() + LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) + for db_entry in db_entries: + LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover + LOGGER.info('-----------------------------------------------------------') + assert len(db_entries) == 203 + + # ----- Get when the object exists --------------------------------------------------------------------------------- + response = context_client_grpc.GetConnection(ConnectionId(**CONNECTION_R1_R3_ID)) + assert response.connection_id.connection_uuid.uuid == CONNECTION_R1_R3_UUID + assert response.service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.service_id.service_uuid.uuid == SERVICE_R1_R3_UUID + assert len(response.path_hops_endpoint_ids) == 6 + assert len(response.sub_service_ids) == 2 + + # ----- List when the object exists -------------------------------------------------------------------------------- + response = context_client_grpc.ListConnectionIds(ServiceId(**SERVICE_R1_R3_ID)) + assert len(response.connection_ids) == 1 + assert response.connection_ids[0].connection_uuid.uuid == CONNECTION_R1_R3_UUID + + response = context_client_grpc.ListConnections(ServiceId(**SERVICE_R1_R3_ID)) + assert len(response.connections) == 1 + assert response.connections[0].connection_id.connection_uuid.uuid == CONNECTION_R1_R3_UUID + assert len(response.connections[0].path_hops_endpoint_ids) == 6 + assert len(response.connections[0].sub_service_ids) == 2 + + # ----- Remove the object ------------------------------------------------------------------------------------------ + context_client_grpc.RemoveConnection(ConnectionId(**CONNECTION_R1_R3_ID)) + context_client_grpc.RemoveService(ServiceId(**SERVICE_R1_R3_ID)) + context_client_grpc.RemoveService(ServiceId(**SERVICE_R2_R3_ID)) + context_client_grpc.RemoveService(ServiceId(**SERVICE_R1_R2_ID)) + context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R1_ID)) + context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R2_ID)) + context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R3_ID)) + context_client_grpc.RemoveTopology(TopologyId(**TOPOLOGY_ID)) + context_client_grpc.RemoveContext(ContextId(**CONTEXT_ID)) + + # ----- Check remove event ----------------------------------------------------------------------------------------- + events = events_collector.get_events(block=True, count=9) + + assert isinstance(events[0], ConnectionEvent) + assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert events[0].connection_id.connection_uuid.uuid == CONNECTION_R1_R3_UUID + + assert isinstance(events[1], ServiceEvent) + assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert events[1].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert events[1].service_id.service_uuid.uuid == SERVICE_R1_R3_UUID + + assert isinstance(events[2], ServiceEvent) + assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert events[2].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert events[2].service_id.service_uuid.uuid == SERVICE_R2_R3_UUID + + assert isinstance(events[3], ServiceEvent) + assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert events[3].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert events[3].service_id.service_uuid.uuid == SERVICE_R1_R2_UUID + + assert isinstance(events[4], DeviceEvent) + assert events[4].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert events[4].device_id.device_uuid.uuid == DEVICE_R1_UUID + + assert isinstance(events[5], DeviceEvent) + assert events[5].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert events[5].device_id.device_uuid.uuid == DEVICE_R2_UUID + + assert isinstance(events[6], DeviceEvent) + assert events[6].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert events[6].device_id.device_uuid.uuid == DEVICE_R3_UUID + + assert isinstance(events[7], TopologyEvent) + assert events[7].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert events[7].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert events[7].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + + assert isinstance(events[8], ContextEvent) + assert events[8].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert events[8].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + + # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- + events_collector.stop() + + # ----- Dump state of database after remove the object ------------------------------------------------------------- + db_entries = context_database.dump() + LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) + for db_entry in db_entries: + LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover + LOGGER.info('-----------------------------------------------------------') + assert len(db_entries) == 0 diff --git a/src/context/tests/_test_context.py b/src/context/tests/_test_context.py new file mode 100644 index 000000000..ef67d39d7 --- /dev/null +++ b/src/context/tests/_test_context.py @@ -0,0 +1,160 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy, grpc, pytest, uuid +from common.Constants import DEFAULT_CONTEXT_UUID +from common.proto.context_pb2 import Context, ContextId, Empty +from common.tools.object_factory.Context import json_context_id +from common.tools.object_factory.Service import json_service_id +from common.tools.object_factory.Slice import json_slice_id +from common.tools.object_factory.Topology import json_topology_id +from context.client.ContextClient import ContextClient +#from context.client.EventsCollector import EventsCollector +from .Objects import CONTEXT, CONTEXT_ID + +def grpc_context(context_client_grpc : ContextClient) -> None: + + # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- + #events_collector = EventsCollector( + # context_client_grpc, log_events_received=True, + # activate_context_collector = True, activate_topology_collector = False, activate_device_collector = False, + # activate_link_collector = False, activate_service_collector = False, activate_slice_collector = False, + # activate_connection_collector = False) + #events_collector.start() + + # ----- Get when the object does not exist ------------------------------------------------------------------------- + with pytest.raises(grpc.RpcError) as e: + context_client_grpc.GetContext(ContextId(**CONTEXT_ID)) + assert e.value.code() == grpc.StatusCode.NOT_FOUND + assert e.value.details() == 'Context({:s}) not found'.format(DEFAULT_CONTEXT_UUID) + + # ----- List when the object does not exist ------------------------------------------------------------------------ + response = context_client_grpc.ListContextIds(Empty()) + assert len(response.context_ids) == 0 + + response = context_client_grpc.ListContexts(Empty()) + assert len(response.contexts) == 0 + + # ----- Create the object ------------------------------------------------------------------------------------------ + response = context_client_grpc.SetContext(Context(**CONTEXT)) + assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID + + wrong_context_uuid = str(uuid.uuid4()) + wrong_context_id = json_context_id(wrong_context_uuid) + with pytest.raises(grpc.RpcError) as e: + WRONG_CONTEXT = copy.deepcopy(CONTEXT) + WRONG_CONTEXT['topology_ids'].append(json_topology_id(str(uuid.uuid4()), context_id=wrong_context_id)) + context_client_grpc.SetContext(Context(**WRONG_CONTEXT)) + assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT + msg = 'request.topology_ids[0].context_id.context_uuid.uuid({}) is invalid; '\ + 'should be == request.context_id.context_uuid.uuid({})'.format(wrong_context_uuid, DEFAULT_CONTEXT_UUID) + assert e.value.details() == msg + + with pytest.raises(grpc.RpcError) as e: + WRONG_CONTEXT = copy.deepcopy(CONTEXT) + WRONG_CONTEXT['service_ids'].append(json_service_id(str(uuid.uuid4()), context_id=wrong_context_id)) + context_client_grpc.SetContext(Context(**WRONG_CONTEXT)) + assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT + msg = 'request.service_ids[0].context_id.context_uuid.uuid({}) is invalid; '\ + 'should be == request.context_id.context_uuid.uuid({})'.format(wrong_context_uuid, DEFAULT_CONTEXT_UUID) + assert e.value.details() == msg + + with pytest.raises(grpc.RpcError) as e: + WRONG_CONTEXT = copy.deepcopy(CONTEXT) + WRONG_CONTEXT['slice_ids'].append(json_slice_id(str(uuid.uuid4()), context_id=wrong_context_id)) + context_client_grpc.SetContext(Context(**WRONG_CONTEXT)) + assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT + msg = 'request.slice_ids[0].context_id.context_uuid.uuid({}) is invalid; '\ + 'should be == request.context_id.context_uuid.uuid({})'.format(wrong_context_uuid, DEFAULT_CONTEXT_UUID) + assert e.value.details() == msg + + # ----- Check create event ----------------------------------------------------------------------------------------- + #event = events_collector.get_event(block=True, timeout=10.0) + #assert isinstance(event, ContextEvent) + #assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert event.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + + # ----- Get when the object exists --------------------------------------------------------------------------------- + response = context_client_grpc.GetContext(ContextId(**CONTEXT_ID)) + assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.name == '' + assert len(response.topology_ids) == 0 + assert len(response.service_ids) == 0 + assert len(response.slice_ids) == 0 + + # ----- List when the object exists -------------------------------------------------------------------------------- + response = context_client_grpc.ListContextIds(Empty()) + assert len(response.context_ids) == 1 + assert response.context_ids[0].context_uuid.uuid == DEFAULT_CONTEXT_UUID + + response = context_client_grpc.ListContexts(Empty()) + assert len(response.contexts) == 1 + assert response.contexts[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.contexts[0].name == '' + assert len(response.contexts[0].topology_ids) == 0 + assert len(response.contexts[0].service_ids) == 0 + assert len(response.contexts[0].slice_ids) == 0 + + # ----- Update the object ------------------------------------------------------------------------------------------ + new_context_name = 'new' + CONTEXT_WITH_NAME = copy.deepcopy(CONTEXT) + CONTEXT_WITH_NAME['name'] = new_context_name + response = context_client_grpc.SetContext(Context(**CONTEXT_WITH_NAME)) + assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID + + # ----- Check update event ----------------------------------------------------------------------------------------- + #event = events_collector.get_event(block=True, timeout=10.0) + #assert isinstance(event, ContextEvent) + #assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + #assert event.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + + # ----- Get when the object is modified ---------------------------------------------------------------------------- + response = context_client_grpc.GetContext(ContextId(**CONTEXT_ID)) + assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.name == new_context_name + assert len(response.topology_ids) == 0 + assert len(response.service_ids) == 0 + assert len(response.slice_ids) == 0 + + # ----- List when the object is modified --------------------------------------------------------------------------- + response = context_client_grpc.ListContextIds(Empty()) + assert len(response.context_ids) == 1 + assert response.context_ids[0].context_uuid.uuid == DEFAULT_CONTEXT_UUID + + response = context_client_grpc.ListContexts(Empty()) + assert len(response.contexts) == 1 + assert response.contexts[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.contexts[0].name == new_context_name + assert len(response.contexts[0].topology_ids) == 0 + assert len(response.contexts[0].service_ids) == 0 + assert len(response.contexts[0].slice_ids) == 0 + + # ----- Remove the object ------------------------------------------------------------------------------------------ + context_client_grpc.RemoveContext(ContextId(**CONTEXT_ID)) + + # ----- Check remove event ----------------------------------------------------------------------------------------- + #event = events_collector.get_event(block=True, timeout=10.0) + #assert isinstance(event, ContextEvent) + #assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert event.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + + # ----- List after deleting the object ----------------------------------------------------------------------------- + response = context_client_grpc.ListContextIds(Empty()) + assert len(response.context_ids) == 0 + + response = context_client_grpc.ListContexts(Empty()) + assert len(response.contexts) == 0 + + # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- + #events_collector.stop() diff --git a/src/context/tests/_test_device.py b/src/context/tests/_test_device.py new file mode 100644 index 000000000..20760a961 --- /dev/null +++ b/src/context/tests/_test_device.py @@ -0,0 +1,199 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy, grpc, pytest +from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID +from common.proto.context_pb2 import ( + Context, ContextId, Device, DeviceDriverEnum, DeviceId, DeviceOperationalStatusEnum, Empty, Topology, TopologyId) +from context.client.ContextClient import ContextClient +#from context.client.EventsCollector import EventsCollector +from .Objects import CONTEXT, CONTEXT_ID, DEVICE_R1, DEVICE_R1_ID, DEVICE_R1_UUID, TOPOLOGY, TOPOLOGY_ID + +def grpc_device(context_client_grpc : ContextClient) -> None: + + # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- + #events_collector = EventsCollector( + # context_client_grpc, log_events_received=True, + # activate_context_collector = False, activate_topology_collector = False, activate_device_collector = True, + # activate_link_collector = False, activate_service_collector = False, activate_slice_collector = False, + # activate_connection_collector = False) + #events_collector.start() + + # ----- Prepare dependencies for the test and capture related events ----------------------------------------------- + response = context_client_grpc.SetContext(Context(**CONTEXT)) + assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID + + response = context_client_grpc.SetTopology(Topology(**TOPOLOGY)) + assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + + #events = events_collector.get_events(block=True, count=2) + #assert isinstance(events[0], ContextEvent) + #assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert events[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + #assert isinstance(events[1], TopologyEvent) + #assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert events[1].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + #assert events[1].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + + # ----- Get when the object does not exist ------------------------------------------------------------------------- + with pytest.raises(grpc.RpcError) as e: + context_client_grpc.GetDevice(DeviceId(**DEVICE_R1_ID)) + assert e.value.code() == grpc.StatusCode.NOT_FOUND + assert e.value.details() == 'Device({:s}) not found'.format(DEVICE_R1_UUID) + + # ----- List when the object does not exist ------------------------------------------------------------------------ + response = context_client_grpc.ListDeviceIds(Empty()) + assert len(response.device_ids) == 0 + + response = context_client_grpc.ListDevices(Empty()) + assert len(response.devices) == 0 + + # ----- Create the object ------------------------------------------------------------------------------------------ + with pytest.raises(grpc.RpcError) as e: + WRONG_DEVICE = copy.deepcopy(DEVICE_R1) + WRONG_DEVICE_UUID = '3f03c76d-31fb-47f5-9c1d-bc6b6bfa2d08' + WRONG_DEVICE['device_endpoints'][0]['endpoint_id']['device_id']['device_uuid']['uuid'] = WRONG_DEVICE_UUID + context_client_grpc.SetDevice(Device(**WRONG_DEVICE)) + assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT + msg = 'request.device_endpoints[0].device_id.device_uuid.uuid({}) is invalid; '\ + 'should be == request.device_id.device_uuid.uuid({})'.format(WRONG_DEVICE_UUID, DEVICE_R1_UUID) + assert e.value.details() == msg + + response = context_client_grpc.SetDevice(Device(**DEVICE_R1)) + assert response.device_uuid.uuid == DEVICE_R1_UUID + + # ----- Check create event ----------------------------------------------------------------------------------------- + # event = events_collector.get_event(block=True) + # assert isinstance(event, DeviceEvent) + # assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE + # assert event.device_id.device_uuid.uuid == DEVICE_R1_UUID + + # ----- Get when the object exists --------------------------------------------------------------------------------- + response = context_client_grpc.GetDevice(DeviceId(**DEVICE_R1_ID)) + assert response.device_id.device_uuid.uuid == DEVICE_R1_UUID + assert response.name == '' + assert response.device_type == 'packet-router' + assert len(response.device_config.config_rules) == 3 + assert response.device_operational_status == DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_DISABLED + assert len(response.device_drivers) == 1 + assert DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG in response.device_drivers + assert len(response.device_endpoints) == 3 + + # ----- List when the object exists -------------------------------------------------------------------------------- + response = context_client_grpc.ListDeviceIds(Empty()) + assert len(response.device_ids) == 1 + assert response.device_ids[0].device_uuid.uuid == DEVICE_R1_UUID + + response = context_client_grpc.ListDevices(Empty()) + assert len(response.devices) == 1 + assert response.devices[0].device_id.device_uuid.uuid == DEVICE_R1_UUID + assert response.devices[0].name == '' + assert response.devices[0].device_type == 'packet-router' + assert len(response.devices[0].device_config.config_rules) == 3 + assert response.devices[0].device_operational_status == DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_DISABLED + assert len(response.devices[0].device_drivers) == 1 + assert DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG in response.devices[0].device_drivers + assert len(response.devices[0].device_endpoints) == 3 + + # ----- Update the object ------------------------------------------------------------------------------------------ + new_device_name = 'r1' + new_device_driver = DeviceDriverEnum.DEVICEDRIVER_UNDEFINED + DEVICE_UPDATED = copy.deepcopy(DEVICE_R1) + DEVICE_UPDATED['name'] = new_device_name + DEVICE_UPDATED['device_operational_status'] = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED + DEVICE_UPDATED['device_drivers'].append(new_device_driver) + response = context_client_grpc.SetDevice(Device(**DEVICE_UPDATED)) + assert response.device_uuid.uuid == DEVICE_R1_UUID + + # ----- Check update event ----------------------------------------------------------------------------------------- + # event = events_collector.get_event(block=True) + # assert isinstance(event, DeviceEvent) + # assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + # assert event.device_id.device_uuid.uuid == DEVICE_R1_UUID + + # ----- Get when the object is modified ---------------------------------------------------------------------------- + response = context_client_grpc.GetDevice(DeviceId(**DEVICE_R1_ID)) + assert response.device_id.device_uuid.uuid == DEVICE_R1_UUID + assert response.name == new_device_name + assert response.device_type == 'packet-router' + assert len(response.device_config.config_rules) == 3 + assert response.device_operational_status == DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED + assert len(response.device_drivers) == 2 + assert DeviceDriverEnum.DEVICEDRIVER_UNDEFINED in response.device_drivers + assert DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG in response.device_drivers + assert len(response.device_endpoints) == 3 + + # ----- List when the object is modified --------------------------------------------------------------------------- + response = context_client_grpc.ListDeviceIds(Empty()) + assert len(response.device_ids) == 1 + assert response.device_ids[0].device_uuid.uuid == DEVICE_R1_UUID + + response = context_client_grpc.ListDevices(Empty()) + assert len(response.devices) == 1 + assert response.devices[0].device_id.device_uuid.uuid == DEVICE_R1_UUID + assert response.devices[0].name == new_device_name + assert response.devices[0].device_type == 'packet-router' + assert len(response.devices[0].device_config.config_rules) == 3 + assert response.devices[0].device_operational_status == DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED + assert len(response.devices[0].device_drivers) == 2 + assert DeviceDriverEnum.DEVICEDRIVER_UNDEFINED in response.devices[0].device_drivers + assert DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG in response.devices[0].device_drivers + assert len(response.devices[0].device_endpoints) == 3 + + # ----- Create object relation ------------------------------------------------------------------------------------- + TOPOLOGY_WITH_DEVICE = copy.deepcopy(TOPOLOGY) + TOPOLOGY_WITH_DEVICE['device_ids'].append(DEVICE_R1_ID) + response = context_client_grpc.SetTopology(Topology(**TOPOLOGY_WITH_DEVICE)) + assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + + # ----- Check update event ----------------------------------------------------------------------------------------- + # event = events_collector.get_event(block=True) + # assert isinstance(event, TopologyEvent) + # assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + # assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + # assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + + # ----- Check relation was created --------------------------------------------------------------------------------- + response = context_client_grpc.GetTopology(TopologyId(**TOPOLOGY_ID)) + assert response.topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + assert len(response.device_ids) == 1 + assert response.device_ids[0].device_uuid.uuid == DEVICE_R1_UUID + assert len(response.link_ids) == 0 + + # ----- Remove the object ------------------------------------------------------------------------------------------ + context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R1_ID)) + context_client_grpc.RemoveTopology(TopologyId(**TOPOLOGY_ID)) + context_client_grpc.RemoveContext(ContextId(**CONTEXT_ID)) + + # ----- Check remove event ----------------------------------------------------------------------------------------- + # events = events_collector.get_events(block=True, count=3) + + # assert isinstance(events[0], DeviceEvent) + # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + # assert events[0].device_id.device_uuid.uuid == DEVICE_R1_UUID + + # assert isinstance(events[1], TopologyEvent) + # assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + # assert events[1].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + # assert events[1].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + + # assert isinstance(events[2], ContextEvent) + # assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + # assert events[2].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + + # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- + #events_collector.stop() diff --git a/src/context/tests/_test_link.py b/src/context/tests/_test_link.py new file mode 100644 index 000000000..d493f23d7 --- /dev/null +++ b/src/context/tests/_test_link.py @@ -0,0 +1,189 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy, grpc, pytest +from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID +from common.proto.context_pb2 import Context, ContextId, Device, DeviceId, Empty, Link, LinkId, Topology, TopologyId +from context.client.ContextClient import ContextClient +#from context.client.EventsCollector import EventsCollector +from .Objects import ( + CONTEXT, CONTEXT_ID, DEVICE_R1, DEVICE_R1_ID, DEVICE_R1_UUID, DEVICE_R2, DEVICE_R2_ID, DEVICE_R2_UUID, LINK_R1_R2, + LINK_R1_R2_ID, LINK_R1_R2_UUID, TOPOLOGY, TOPOLOGY_ID) + +def grpc_link(context_client_grpc: ContextClient) -> None: + + # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- + #events_collector = EventsCollector( + # context_client_grpc, log_events_received=True, + # activate_context_collector = False, activate_topology_collector = False, activate_device_collector = False, + # activate_link_collector = True, activate_service_collector = False, activate_slice_collector = False, + # activate_connection_collector = False) + #events_collector.start() + + # ----- Prepare dependencies for the test and capture related events ----------------------------------------------- + response = context_client_grpc.SetContext(Context(**CONTEXT)) + assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID + + response = context_client_grpc.SetTopology(Topology(**TOPOLOGY)) + assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + + response = context_client_grpc.SetDevice(Device(**DEVICE_R1)) + assert response.device_uuid.uuid == DEVICE_R1_UUID + + response = context_client_grpc.SetDevice(Device(**DEVICE_R2)) + assert response.device_uuid.uuid == DEVICE_R2_UUID + + # events = events_collector.get_events(block=True, count=4) + # assert isinstance(events[0], ContextEvent) + # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + # assert events[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + # assert isinstance(events[1], TopologyEvent) + # assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + # assert events[1].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + # assert events[1].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + # assert isinstance(events[2], DeviceEvent) + # assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + # assert events[2].device_id.device_uuid.uuid == DEVICE_R1_UUID + # assert isinstance(events[3], DeviceEvent) + # assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + # assert events[3].device_id.device_uuid.uuid == DEVICE_R2_UUID + + # ----- Get when the object does not exist ------------------------------------------------------------------------- + with pytest.raises(grpc.RpcError) as e: + context_client_grpc.GetLink(LinkId(**LINK_R1_R2_ID)) + assert e.value.code() == grpc.StatusCode.NOT_FOUND + assert e.value.details() == 'Link({:s}) not found'.format(LINK_R1_R2_UUID) + + # ----- List when the object does not exist ------------------------------------------------------------------------ + response = context_client_grpc.ListLinkIds(Empty()) + assert len(response.link_ids) == 0 + + response = context_client_grpc.ListLinks(Empty()) + assert len(response.links) == 0 + + # ----- Create the object ------------------------------------------------------------------------------------------ + response = context_client_grpc.SetLink(Link(**LINK_R1_R2)) + assert response.link_uuid.uuid == LINK_R1_R2_UUID + + # ----- Check create event ----------------------------------------------------------------------------------------- + # event = events_collector.get_event(block=True) + # assert isinstance(event, LinkEvent) + # assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE + # assert event.link_id.link_uuid.uuid == LINK_R1_R2_UUID + + # ----- Get when the object exists --------------------------------------------------------------------------------- + response = context_client_grpc.GetLink(LinkId(**LINK_R1_R2_ID)) + assert response.link_id.link_uuid.uuid == LINK_R1_R2_UUID + assert response.name == '' + assert len(response.link_endpoint_ids) == 2 + + # ----- List when the object exists -------------------------------------------------------------------------------- + response = context_client_grpc.ListLinkIds(Empty()) + assert len(response.link_ids) == 1 + assert response.link_ids[0].link_uuid.uuid == LINK_R1_R2_UUID + + response = context_client_grpc.ListLinks(Empty()) + assert len(response.links) == 1 + assert response.links[0].link_id.link_uuid.uuid == LINK_R1_R2_UUID + assert response.links[0].name == '' + assert len(response.links[0].link_endpoint_ids) == 2 + + # ----- Update the object ------------------------------------------------------------------------------------------ + new_link_name = 'l1' + LINK_UPDATED = copy.deepcopy(LINK_R1_R2) + LINK_UPDATED['name'] = new_link_name + response = context_client_grpc.SetLink(Link(**LINK_UPDATED)) + assert response.link_uuid.uuid == LINK_R1_R2_UUID + + # ----- Check update event ----------------------------------------------------------------------------------------- + # event = events_collector.get_event(block=True) + # assert isinstance(event, LinkEvent) + # assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + # assert event.link_id.link_uuid.uuid == LINK_R1_R2_UUID + + # ----- Get when the object is modified ---------------------------------------------------------------------------- + response = context_client_grpc.GetLink(LinkId(**LINK_R1_R2_ID)) + assert response.link_id.link_uuid.uuid == LINK_R1_R2_UUID + assert response.name == new_link_name + assert len(response.link_endpoint_ids) == 2 + + # ----- List when the object is modified --------------------------------------------------------------------------- + response = context_client_grpc.ListLinkIds(Empty()) + assert len(response.link_ids) == 1 + assert response.link_ids[0].link_uuid.uuid == LINK_R1_R2_UUID + + response = context_client_grpc.ListLinks(Empty()) + assert len(response.links) == 1 + assert response.links[0].link_id.link_uuid.uuid == LINK_R1_R2_UUID + assert response.links[0].name == new_link_name + assert len(response.links[0].link_endpoint_ids) == 2 + + # ----- Create object relation ------------------------------------------------------------------------------------- + TOPOLOGY_WITH_LINK = copy.deepcopy(TOPOLOGY) + TOPOLOGY_WITH_LINK['link_ids'].append(LINK_R1_R2_ID) + response = context_client_grpc.SetTopology(Topology(**TOPOLOGY_WITH_LINK)) + assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + + # ----- Check update event ----------------------------------------------------------------------------------------- + # event = events_collector.get_event(block=True) + # assert isinstance(event, TopologyEvent) + # assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + # assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + # assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + + # ----- Check relation was created --------------------------------------------------------------------------------- + response = context_client_grpc.GetTopology(TopologyId(**TOPOLOGY_ID)) + assert response.topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + assert len(response.device_ids) == 2 + assert response.device_ids[0].device_uuid.uuid in {DEVICE_R1_UUID, DEVICE_R2_UUID} + assert response.device_ids[1].device_uuid.uuid in {DEVICE_R1_UUID, DEVICE_R2_UUID} + assert len(response.link_ids) == 1 + assert response.link_ids[0].link_uuid.uuid == LINK_R1_R2_UUID + + # ----- Remove the object ------------------------------------------------------------------------------------------ + #context_client_grpc.RemoveLink(LinkId(**LINK_R1_R2_ID)) + #context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R1_ID)) + #context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R2_ID)) + #context_client_grpc.RemoveTopology(TopologyId(**TOPOLOGY_ID)) + #context_client_grpc.RemoveContext(ContextId(**CONTEXT_ID)) + + # ----- Check remove event ----------------------------------------------------------------------------------------- + # events = events_collector.get_events(block=True, count=5) + # + # assert isinstance(events[0], LinkEvent) + # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + # assert events[0].link_id.link_uuid.uuid == LINK_R1_R2_UUID + # + # assert isinstance(events[1], DeviceEvent) + # assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + # assert events[1].device_id.device_uuid.uuid == DEVICE_R1_UUID + # + # assert isinstance(events[2], DeviceEvent) + # assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + # assert events[2].device_id.device_uuid.uuid == DEVICE_R2_UUID + # + # assert isinstance(events[3], TopologyEvent) + # assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + # assert events[3].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + # assert events[3].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + # + # assert isinstance(events[4], ContextEvent) + # assert events[4].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + # assert events[4].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + + # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- + #events_collector.stop() diff --git a/src/context/tests/_test_policy.py b/src/context/tests/_test_policy.py new file mode 100644 index 000000000..e416575f7 --- /dev/null +++ b/src/context/tests/_test_policy.py @@ -0,0 +1,114 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import grpc, pytest +from typing import Tuple +from common.proto.context_pb2 import Empty +from common.proto.policy_pb2 import PolicyRuleId, PolicyRule +from context.client.ContextClient import ContextClient +#from context.client.EventsCollector import EventsCollector +from .Objects import POLICY_RULE, POLICY_RULE_ID, POLICY_RULE_UUID + +def grpc_policy( + context_client_grpc : ContextClient, # pylint: disable=redefined-outer-name + context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name + context_database = context_db_mb[0] + + # ----- Clean the database ----------------------------------------------------------------------------------------- + context_database.clear_all() + + # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- + #events_collector = EventsCollector(context_client_grpc) + #events_collector.start() + + # ----- Get when the object does not exist ------------------------------------------------------------------------- + POLICY_ID = 'no-uuid' + DEFAULT_POLICY_ID = {'uuid': {'uuid': POLICY_ID}} + + with pytest.raises(grpc.RpcError) as e: + context_client_grpc.GetPolicyRule(PolicyRuleId(**DEFAULT_POLICY_ID)) + + assert e.value.code() == grpc.StatusCode.NOT_FOUND + assert e.value.details() == 'PolicyRule({:s}) not found'.format(POLICY_ID) + + # ----- List when the object does not exist ------------------------------------------------------------------------ + response = context_client_grpc.ListPolicyRuleIds(Empty()) + assert len(response.policyRuleIdList) == 0 + + response = context_client_grpc.ListPolicyRules(Empty()) + assert len(response.policyRules) == 0 + + # ----- Dump state of database before create the object ------------------------------------------------------------ + db_entries = context_database.dump() + LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) + for db_entry in db_entries: + LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover + LOGGER.info('-----------------------------------------------------------') + assert len(db_entries) == 0 + + # ----- Create the object ------------------------------------------------------------------------------------------ + response = context_client_grpc.SetPolicyRule(PolicyRule(**POLICY_RULE)) + assert response.uuid.uuid == POLICY_RULE_UUID + + # ----- Check create event ----------------------------------------------------------------------------------------- + # events = events_collector.get_events(block=True, count=1) + # assert isinstance(events[0], PolicyEvent) + # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + # assert events[0].policy_id.uuid.uuid == POLICY_RULE_UUID + + # ----- Update the object ------------------------------------------------------------------------------------------ + response = context_client_grpc.SetPolicyRule(PolicyRule(**POLICY_RULE)) + assert response.uuid.uuid == POLICY_RULE_UUID + + # ----- Dump state of database after create/update the object ------------------------------------------------------ + db_entries = context_database.dump() + LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) + for db_entry in db_entries: + LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover + LOGGER.info('-----------------------------------------------------------') + assert len(db_entries) == 2 + + # ----- Get when the object exists --------------------------------------------------------------------------------- + response = context_client_grpc.GetPolicyRule(PolicyRuleId(**POLICY_RULE_ID)) + assert response.device.policyRuleBasic.policyRuleId.uuid.uuid == POLICY_RULE_UUID + + # ----- List when the object exists -------------------------------------------------------------------------------- + response = context_client_grpc.ListPolicyRuleIds(Empty()) + assert len(response.policyRuleIdList) == 1 + assert response.policyRuleIdList[0].uuid.uuid == POLICY_RULE_UUID + + response = context_client_grpc.ListPolicyRules(Empty()) + assert len(response.policyRules) == 1 + + # ----- Remove the object ------------------------------------------------------------------------------------------ + context_client_grpc.RemovePolicyRule(PolicyRuleId(**POLICY_RULE_ID)) + + # ----- Check remove event ----------------------------------------------------------------------------------------- + # events = events_collector.get_events(block=True, count=2) + + # assert isinstance(events[0], PolicyEvent) + # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + # assert events[0].policy_id.uuid.uuid == POLICY_RULE_UUID + + + # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- + # events_collector.stop() + + # ----- Dump state of database after remove the object ------------------------------------------------------------- + db_entries = context_database.dump() + LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) + for db_entry in db_entries: + LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover + LOGGER.info('-----------------------------------------------------------') + assert len(db_entries) == 0 diff --git a/src/context/tests/_test_service.py b/src/context/tests/_test_service.py new file mode 100644 index 000000000..88ece2ba9 --- /dev/null +++ b/src/context/tests/_test_service.py @@ -0,0 +1,214 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy, grpc, pytest +from typing import Tuple +from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID +from common.proto.context_pb2 import ( + Context, ContextEvent, ContextId, Device, DeviceEvent, DeviceId, EventTypeEnum, Service, ServiceEvent, ServiceId, + ServiceStatusEnum, ServiceTypeEnum, Topology, TopologyEvent, TopologyId) +from context.client.ContextClient import ContextClient +from context.client.EventsCollector import EventsCollector +from .Objects import ( + CONTEXT, CONTEXT_ID, DEVICE_R1, DEVICE_R1_ID, DEVICE_R1_UUID, DEVICE_R2, DEVICE_R2_ID, DEVICE_R2_UUID, + SERVICE_R1_R2, SERVICE_R1_R2_ID, SERVICE_R1_R2_UUID, TOPOLOGY, TOPOLOGY_ID) + +def grpc_service( + context_client_grpc : ContextClient, # pylint: disable=redefined-outer-name + context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name + Session = context_db_mb[0] + # ----- Clean the database ----------------------------------------------------------------------------------------- + database = Database(Session) + database.clear() + + # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- + events_collector = EventsCollector(context_client_grpc) + events_collector.start() + + # ----- Prepare dependencies for the test and capture related events ----------------------------------------------- + response = context_client_grpc.SetContext(Context(**CONTEXT)) + assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID + + response = context_client_grpc.SetTopology(Topology(**TOPOLOGY)) + assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + + response = context_client_grpc.SetDevice(Device(**DEVICE_R1)) + assert response.device_uuid.uuid == DEVICE_R1_UUID + + response = context_client_grpc.SetDevice(Device(**DEVICE_R2)) + assert response.device_uuid.uuid == DEVICE_R2_UUID + # events = events_collector.get_events(block=True, count=4) + # + # assert isinstance(events[0], ContextEvent) + # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + # assert events[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + # + # assert isinstance(events[1], TopologyEvent) + # assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + # assert events[1].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + # assert events[1].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + # + # assert isinstance(events[2], DeviceEvent) + # assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + # assert events[2].device_id.device_uuid.uuid == DEVICE_R1_UUID + # + # assert isinstance(events[3], DeviceEvent) + # assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + # assert events[3].device_id.device_uuid.uuid == DEVICE_R2_UUID + LOGGER.info('----------------') + + # ----- Get when the object does not exist ------------------------------------------------------------------------- + with pytest.raises(grpc.RpcError) as e: + context_client_grpc.GetService(ServiceId(**SERVICE_R1_R2_ID)) + assert e.value.code() == grpc.StatusCode.NOT_FOUND + assert e.value.details() == 'Service({:s}) not found'.format(SERVICE_R1_R2_UUID) + LOGGER.info('----------------') + + # ----- List when the object does not exist ------------------------------------------------------------------------ + response = context_client_grpc.ListServiceIds(ContextId(**CONTEXT_ID)) + assert len(response.service_ids) == 0 + LOGGER.info('----------------') + + response = context_client_grpc.ListServices(ContextId(**CONTEXT_ID)) + assert len(response.services) == 0 + LOGGER.info('----------------') + + # ----- Dump state of database before create the object ------------------------------------------------------------ + db_entries = database.dump_all() + LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) + for db_entry in db_entries: + LOGGER.info(db_entry) + LOGGER.info('-----------------------------------------------------------') + assert len(db_entries) == 80 + + # ----- Create the object ------------------------------------------------------------------------------------------ + with pytest.raises(grpc.RpcError) as e: + WRONG_SERVICE = copy.deepcopy(SERVICE_R1_R2) + WRONG_SERVICE['service_endpoint_ids'][0]\ + ['topology_id']['context_id']['context_uuid']['uuid'] = 'ca1ea172-728f-441d-972c-feeae8c9bffc' + context_client_grpc.SetService(Service(**WRONG_SERVICE)) + assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT + msg = 'request.service_endpoint_ids[0].topology_id.context_id.context_uuid.uuid(ca1ea172-728f-441d-972c-feeae8c9bffc) is invalid; '\ + 'should be == request.service_id.context_id.context_uuid.uuid({:s})'.format(DEFAULT_CONTEXT_UUID) + assert e.value.details() == msg + + response = context_client_grpc.SetService(Service(**SERVICE_R1_R2)) + assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.service_uuid.uuid == SERVICE_R1_R2_UUID + + CONTEXT_WITH_SERVICE = copy.deepcopy(CONTEXT) + CONTEXT_WITH_SERVICE['service_ids'].append(SERVICE_R1_R2_ID) + response = context_client_grpc.SetContext(Context(**CONTEXT_WITH_SERVICE)) + assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID + + # ----- Check create event ----------------------------------------------------------------------------------------- + events = events_collector.get_events(block=True, count=2) + + assert isinstance(events[0], ServiceEvent) + assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert events[0].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert events[0].service_id.service_uuid.uuid == SERVICE_R1_R2_UUID + + assert isinstance(events[1], ContextEvent) + assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + assert events[1].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + + # ----- Update the object ------------------------------------------------------------------------------------------ + response = context_client_grpc.SetService(Service(**SERVICE_R1_R2)) + assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.service_uuid.uuid == SERVICE_R1_R2_UUID + + # ----- Check update event ----------------------------------------------------------------------------------------- + event = events_collector.get_event(block=True) + assert isinstance(event, ServiceEvent) + assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + assert event.service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert event.service_id.service_uuid.uuid == SERVICE_R1_R2_UUID + + # ----- Dump state of database after create/update the object ------------------------------------------------------ + db_entries = context_database.dump() + LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) + for db_entry in db_entries: + LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover + LOGGER.info('-----------------------------------------------------------') + assert len(db_entries) == 108 + + # ----- Get when the object exists --------------------------------------------------------------------------------- + response = context_client_grpc.GetService(ServiceId(**SERVICE_R1_R2_ID)) + assert response.service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.service_id.service_uuid.uuid == SERVICE_R1_R2_UUID + assert response.service_type == ServiceTypeEnum.SERVICETYPE_L3NM + assert len(response.service_endpoint_ids) == 2 + assert len(response.service_constraints) == 2 + assert response.service_status.service_status == ServiceStatusEnum.SERVICESTATUS_PLANNED + assert len(response.service_config.config_rules) == 3 + + # ----- List when the object exists -------------------------------------------------------------------------------- + response = context_client_grpc.ListServiceIds(ContextId(**CONTEXT_ID)) + assert len(response.service_ids) == 1 + assert response.service_ids[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.service_ids[0].service_uuid.uuid == SERVICE_R1_R2_UUID + + response = context_client_grpc.ListServices(ContextId(**CONTEXT_ID)) + assert len(response.services) == 1 + assert response.services[0].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.services[0].service_id.service_uuid.uuid == SERVICE_R1_R2_UUID + assert response.services[0].service_type == ServiceTypeEnum.SERVICETYPE_L3NM + assert len(response.services[0].service_endpoint_ids) == 2 + assert len(response.services[0].service_constraints) == 2 + assert response.services[0].service_status.service_status == ServiceStatusEnum.SERVICESTATUS_PLANNED + assert len(response.services[0].service_config.config_rules) == 3 + + # ----- Remove the object ------------------------------------------------------------------------------------------ + context_client_grpc.RemoveService(ServiceId(**SERVICE_R1_R2_ID)) + context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R1_ID)) + context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R2_ID)) + context_client_grpc.RemoveTopology(TopologyId(**TOPOLOGY_ID)) + context_client_grpc.RemoveContext(ContextId(**CONTEXT_ID)) + + # ----- Check remove event ----------------------------------------------------------------------------------------- + events = events_collector.get_events(block=True, count=5) + + assert isinstance(events[0], ServiceEvent) + assert events[0].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert events[0].service_id.service_uuid.uuid == SERVICE_R1_R2_UUID + + assert isinstance(events[1], DeviceEvent) + assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert events[1].device_id.device_uuid.uuid == DEVICE_R1_UUID + + assert isinstance(events[2], DeviceEvent) + assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert events[2].device_id.device_uuid.uuid == DEVICE_R2_UUID + + assert isinstance(events[3], TopologyEvent) + assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert events[3].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert events[3].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + + assert isinstance(events[4], ContextEvent) + assert events[4].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert events[4].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + + # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- + events_collector.stop() + + # ----- Dump state of database after remove the object ------------------------------------------------------------- + db_entries = context_database.dump() + LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) + for db_entry in db_entries: + LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover + LOGGER.info('-----------------------------------------------------------') + assert len(db_entries) == 0 diff --git a/src/context/tests/_test_slice.py b/src/context/tests/_test_slice.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/context/tests/_test_topology.py b/src/context/tests/_test_topology.py new file mode 100644 index 000000000..9774d972f --- /dev/null +++ b/src/context/tests/_test_topology.py @@ -0,0 +1,166 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy, grpc, pytest +from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID +from common.proto.context_pb2 import Context, ContextId, Topology, TopologyId +from context.client.ContextClient import ContextClient +#from context.client.EventsCollector import EventsCollector +from .Objects import CONTEXT, CONTEXT_ID, TOPOLOGY, TOPOLOGY_ID + +def grpc_topology(context_client_grpc : ContextClient) -> None: + + # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- + #events_collector = EventsCollector( + # context_client_grpc, log_events_received=True, + # activate_context_collector = False, activate_topology_collector = True, activate_device_collector = False, + # activate_link_collector = False, activate_service_collector = False, activate_slice_collector = False, + # activate_connection_collector = False) + #events_collector.start() + + # ----- Prepare dependencies for the test and capture related events ----------------------------------------------- + response = context_client_grpc.SetContext(Context(**CONTEXT)) + assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID + # event = events_collector.get_event(block=True) + # assert isinstance(event, ContextEvent) + # assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE + # assert event.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + + # ----- Get when the object does not exist ------------------------------------------------------------------------- + with pytest.raises(grpc.RpcError) as e: + context_client_grpc.GetTopology(TopologyId(**TOPOLOGY_ID)) + assert e.value.code() == grpc.StatusCode.NOT_FOUND + assert e.value.details() == 'Topology({:s}/{:s}) not found'.format(DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID) + + # ----- List when the object does not exist ------------------------------------------------------------------------ + response = context_client_grpc.ListTopologyIds(ContextId(**CONTEXT_ID)) + assert len(response.topology_ids) == 0 + + response = context_client_grpc.ListTopologies(ContextId(**CONTEXT_ID)) + assert len(response.topologies) == 0 + + # ----- Create the object ------------------------------------------------------------------------------------------ + response = context_client_grpc.SetTopology(Topology(**TOPOLOGY)) + assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + + #CONTEXT_WITH_TOPOLOGY = copy.deepcopy(CONTEXT) + #CONTEXT_WITH_TOPOLOGY['topology_ids'].append(TOPOLOGY_ID) + #response = context_client_grpc.SetContext(Context(**CONTEXT_WITH_TOPOLOGY)) + #assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID + + # ----- Check create event ----------------------------------------------------------------------------------------- + #events = events_collector.get_events(block=True, count=2) + #assert isinstance(events[0], TopologyEvent) + #assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert events[0].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + #assert events[0].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + #assert isinstance(events[1], ContextEvent) + #assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + #assert events[1].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + + # ----- Get when the object exists --------------------------------------------------------------------------------- + response = context_client_grpc.GetContext(ContextId(**CONTEXT_ID)) + assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.name == '' + assert len(response.topology_ids) == 1 + assert response.topology_ids[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.topology_ids[0].topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + assert len(response.service_ids) == 0 + assert len(response.slice_ids) == 0 + + response = context_client_grpc.GetTopology(TopologyId(**TOPOLOGY_ID)) + assert response.topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + assert response.name == '' + assert len(response.device_ids) == 0 + assert len(response.link_ids) == 0 + + # ----- List when the object exists -------------------------------------------------------------------------------- + response = context_client_grpc.ListTopologyIds(ContextId(**CONTEXT_ID)) + assert len(response.topology_ids) == 1 + assert response.topology_ids[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.topology_ids[0].topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + + response = context_client_grpc.ListTopologies(ContextId(**CONTEXT_ID)) + assert len(response.topologies) == 1 + assert response.topologies[0].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.topologies[0].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + assert response.topologies[0].name == '' + assert len(response.topologies[0].device_ids) == 0 + assert len(response.topologies[0].link_ids) == 0 + + # ----- Update the object ------------------------------------------------------------------------------------------ + new_topology_name = 'new' + TOPOLOGY_WITH_NAME = copy.deepcopy(TOPOLOGY) + TOPOLOGY_WITH_NAME['name'] = new_topology_name + response = context_client_grpc.SetTopology(Topology(**TOPOLOGY_WITH_NAME)) + assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + + # ----- Check update event ----------------------------------------------------------------------------------------- + #event = events_collector.get_event(block=True) + #assert isinstance(event, TopologyEvent) + #assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + #assert event.topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + #assert event.topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + + # ----- Get when the object is modified ---------------------------------------------------------------------------- + response = context_client_grpc.GetTopology(TopologyId(**TOPOLOGY_ID)) + assert response.topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + assert response.name == new_topology_name + assert len(response.device_ids) == 0 + assert len(response.link_ids) == 0 + + # ----- List when the object is modified --------------------------------------------------------------------------- + response = context_client_grpc.ListTopologyIds(ContextId(**CONTEXT_ID)) + assert len(response.topology_ids) == 1 + assert response.topology_ids[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.topology_ids[0].topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + + response = context_client_grpc.ListTopologies(ContextId(**CONTEXT_ID)) + assert len(response.topologies) == 1 + assert response.topologies[0].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.topologies[0].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + assert response.topologies[0].name == new_topology_name + assert len(response.topologies[0].device_ids) == 0 + assert len(response.topologies[0].link_ids) == 0 + + # ----- Remove the object ------------------------------------------------------------------------------------------ + context_client_grpc.RemoveTopology(TopologyId(**TOPOLOGY_ID)) + + # ----- Check remove event ----------------------------------------------------------------------------------------- + #event = events_collector.get_event(block=True) + #assert isinstance(event, TopologyEvent) + #assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert event.topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + #assert event.topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + + # ----- List after deleting the object ----------------------------------------------------------------------------- + response = context_client_grpc.ListTopologyIds(ContextId(**CONTEXT_ID)) + assert len(response.topology_ids) == 0 + + response = context_client_grpc.ListTopologies(ContextId(**CONTEXT_ID)) + assert len(response.topologies) == 0 + + # ----- Clean dependencies used in the test and capture related events --------------------------------------------- + context_client_grpc.RemoveContext(ContextId(**CONTEXT_ID)) + #event = events_collector.get_event(block=True) + #assert isinstance(event, ContextEvent) + #assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert event.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + + # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- + #events_collector.stop() diff --git a/src/context/tests/conftest.py b/src/context/tests/conftest.py new file mode 100644 index 000000000..cf56ed9af --- /dev/null +++ b/src/context/tests/conftest.py @@ -0,0 +1,153 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json, os, pytest, sqlalchemy +from _pytest.config import Config +from _pytest.terminal import TerminalReporter +from prettytable import PrettyTable +from typing import Any, Dict, List, Tuple +from common.Constants import ServiceNameEnum +from common.Settings import ( + ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, ENVVAR_SUFIX_SERVICE_PORT_HTTP, get_env_var_name, + get_service_port_grpc, get_service_port_http) +from common.message_broker.Factory import get_messagebroker_backend, BackendEnum as MessageBrokerBackendEnum +from common.message_broker.MessageBroker import MessageBroker +from context.client.ContextClient import ContextClient +from context.service.ContextService import ContextService +from context.service.Database import Database +from context.service.Engine import Engine +from context.service.database.models._Base import rebuild_database +#from context.service._old_code.Populate import populate +#from context.service.rest_server.RestServer import RestServer +#from context.service.rest_server.Resources import RESOURCES + + +LOCAL_HOST = '127.0.0.1' +GRPC_PORT = 10000 + int(get_service_port_grpc(ServiceNameEnum.CONTEXT)) # avoid privileged ports +HTTP_PORT = 10000 + int(get_service_port_http(ServiceNameEnum.CONTEXT)) # avoid privileged ports + +os.environ[get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_HOST )] = str(LOCAL_HOST) +os.environ[get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(GRPC_PORT) +os.environ[get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_HTTP)] = str(HTTP_PORT) + +#DEFAULT_REDIS_SERVICE_HOST = LOCAL_HOST +#DEFAULT_REDIS_SERVICE_PORT = 6379 +#DEFAULT_REDIS_DATABASE_ID = 0 + +#REDIS_CONFIG = { +# 'REDIS_SERVICE_HOST': os.environ.get('REDIS_SERVICE_HOST', DEFAULT_REDIS_SERVICE_HOST), +# 'REDIS_SERVICE_PORT': os.environ.get('REDIS_SERVICE_PORT', DEFAULT_REDIS_SERVICE_PORT), +# 'REDIS_DATABASE_ID' : os.environ.get('REDIS_DATABASE_ID', DEFAULT_REDIS_DATABASE_ID ), +#} + +#SCENARIOS = [ +# ('db:cockroach_mb:inmemory', None, {}, None, {}), +# ('all_inmemory', DatabaseBackendEnum.INMEMORY, {}, MessageBrokerBackendEnum.INMEMORY, {} ) +# ('all_redis', DatabaseBackendEnum.REDIS, REDIS_CONFIG, MessageBrokerBackendEnum.REDIS, REDIS_CONFIG), +#] + +#@pytest.fixture(scope='session', ids=[str(scenario[0]) for scenario in SCENARIOS], params=SCENARIOS) +@pytest.fixture(scope='session') +def context_db_mb(request) -> Tuple[sqlalchemy.engine.Engine, MessageBroker]: # pylint: disable=unused-argument + #name,db_session,mb_backend,mb_settings = request.param + #msg = 'Running scenario {:s} db_session={:s}, mb_backend={:s}, mb_settings={:s}...' + #LOGGER.info(msg.format(str(name), str(db_session), str(mb_backend.value), str(mb_settings))) + + _db_engine = Engine.get_engine() + Engine.drop_database(_db_engine) + Engine.create_database(_db_engine) + rebuild_database(_db_engine) + + _msg_broker = MessageBroker(get_messagebroker_backend(backend=MessageBrokerBackendEnum.INMEMORY)) + yield _db_engine, _msg_broker + _msg_broker.terminate() + +RAW_METRICS = None + +@pytest.fixture(scope='session') +def context_service_grpc(context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name + global RAW_METRICS # pylint: disable=global-statement + _service = ContextService(context_db_mb[0], context_db_mb[1]) + RAW_METRICS = _service.context_servicer._get_metrics() + _service.start() + yield _service + _service.stop() + +#@pytest.fixture(scope='session') +#def context_service_rest(context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name +# database = context_db_mb[0] +# _rest_server = RestServer() +# for endpoint_name, resource_class, resource_url in RESOURCES: +# _rest_server.add_resource(resource_class, resource_url, endpoint=endpoint_name, resource_class_args=(database,)) +# _rest_server.start() +# time.sleep(1) # bring time for the server to start +# yield _rest_server +# _rest_server.shutdown() +# _rest_server.join() + +@pytest.fixture(scope='session') +def context_client_grpc( + context_service_grpc : ContextService # pylint: disable=redefined-outer-name,unused-argument +): + _client = ContextClient() + yield _client + _client.close() + +@pytest.hookimpl(hookwrapper=True) +def pytest_terminal_summary( + terminalreporter : TerminalReporter, exitstatus : int, config : Config # pylint: disable=unused-argument +): + yield + + method_to_metric_fields : Dict[str, Dict[str, Dict[str, Any]]]= dict() + for raw_metric_name,raw_metric_data in RAW_METRICS.items(): + if '_COUNTER_' in raw_metric_name: + method_name,metric_name = raw_metric_name.split('_COUNTER_') + elif '_HISTOGRAM_' in raw_metric_name: + method_name,metric_name = raw_metric_name.split('_HISTOGRAM_') + else: + raise Exception('Unsupported metric: {:s}'.format(raw_metric_name)) + metric_data = method_to_metric_fields.setdefault(method_name, dict()).setdefault(metric_name, dict()) + for field_name,labels,value,_,_ in raw_metric_data._child_samples(): + if len(labels) > 0: field_name = '{:s}:{:s}'.format(field_name, json.dumps(labels, sort_keys=True)) + metric_data[field_name] = value + #print('method_to_metric_fields', method_to_metric_fields) + + def sort_stats_key(item : List) -> float: + str_duration = str(item[0]) + if str_duration == '---': return 0.0 + return float(str_duration.replace(' ms', '')) + + field_names = ['Method', 'Started', 'Completed', 'Failed', 'avg(Duration)'] + pt_stats = PrettyTable(field_names=field_names, sortby='avg(Duration)', sort_key=sort_stats_key, reversesort=True) + for f in ['Method']: pt_stats.align[f] = 'l' + for f in ['Started', 'Completed', 'Failed', 'avg(Duration)']: pt_stats.align[f] = 'r' + + for method_name,metrics in method_to_metric_fields.items(): + counter_started_value = int(metrics['STARTED']['_total']) + if counter_started_value == 0: + #pt_stats.add_row([method_name, '---', '---', '---', '---']) + continue + counter_completed_value = int(metrics['COMPLETED']['_total']) + counter_failed_value = int(metrics['FAILED']['_total']) + duration_count_value = float(metrics['DURATION']['_count']) + duration_sum_value = float(metrics['DURATION']['_sum']) + duration_avg_value = duration_sum_value/duration_count_value + pt_stats.add_row([ + method_name, str(counter_started_value), str(counter_completed_value), str(counter_failed_value), + '{:.3f} ms'.format(1000.0 * duration_avg_value), + ]) + print('') + print('Performance Results:') + print(pt_stats.get_string()) diff --git a/src/context/tests/test_hasher.py b/src/context/tests/test_hasher.py new file mode 100644 index 000000000..f9a52f5d0 --- /dev/null +++ b/src/context/tests/test_hasher.py @@ -0,0 +1,47 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, pytest +from context.service.database.tools.FastHasher import ( + FASTHASHER_DATA_ACCEPTED_FORMAT, FASTHASHER_ITEM_ACCEPTED_FORMAT, fast_hasher) + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + + +# ----- Test misc. Context internal tools ------------------------------------------------------------------------------ + +def test_tools_fast_string_hasher(): + with pytest.raises(TypeError) as e: + fast_hasher(27) + assert str(e.value) == "data(27) must be " + FASTHASHER_DATA_ACCEPTED_FORMAT + ", found <class 'int'>" + + with pytest.raises(TypeError) as e: + fast_hasher({27}) + assert str(e.value) == "data({27}) must be " + FASTHASHER_DATA_ACCEPTED_FORMAT + ", found <class 'set'>" + + with pytest.raises(TypeError) as e: + fast_hasher({'27'}) + assert str(e.value) == "data({'27'}) must be " + FASTHASHER_DATA_ACCEPTED_FORMAT + ", found <class 'set'>" + + with pytest.raises(TypeError) as e: + fast_hasher([27]) + assert str(e.value) == "data[0](27) must be " + FASTHASHER_ITEM_ACCEPTED_FORMAT + ", found <class 'int'>" + + fast_hasher('hello-world') + fast_hasher('hello-world'.encode('UTF-8')) + fast_hasher(['hello', 'world']) + fast_hasher(('hello', 'world')) + fast_hasher(['hello'.encode('UTF-8'), 'world'.encode('UTF-8')]) + fast_hasher(('hello'.encode('UTF-8'), 'world'.encode('UTF-8'))) diff --git a/src/context/tests/test_unitary.py b/src/context/tests/test_unitary.py index c85042d2c..6845036bd 100644 --- a/src/context/tests/test_unitary.py +++ b/src/context/tests/test_unitary.py @@ -12,1348 +12,44 @@ # See the License for the specific language governing permissions and # limitations under the License. -# pylint: disable=too-many-lines -import copy, grpc, logging, os, pytest, requests, sqlalchemy, time, urllib, uuid -from typing import Tuple -from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID, ServiceNameEnum -from common.Settings import ( - ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, ENVVAR_SUFIX_SERVICE_PORT_HTTP, get_env_var_name, - get_service_baseurl_http, get_service_port_grpc, get_service_port_http) -from context.service.Database import Database -from common.message_broker.Factory import get_messagebroker_backend, BackendEnum as MessageBrokerBackendEnum -from common.message_broker.MessageBroker import MessageBroker -from common.proto.context_pb2 import ( - Connection, ConnectionEvent, ConnectionId, Context, ContextEvent, ContextId, Device, DeviceDriverEnum, DeviceEvent, DeviceId, - DeviceOperationalStatusEnum, Empty, EventTypeEnum, Link, LinkEvent, LinkId, Service, ServiceEvent, ServiceId, - ServiceStatusEnum, ServiceTypeEnum, Topology, TopologyEvent, TopologyId) -from common.proto.policy_pb2 import (PolicyRuleIdList, PolicyRuleId, PolicyRuleList, PolicyRule) -from common.tools.object_factory.Context import json_context, json_context_id -from common.tools.object_factory.Service import json_service_id -from common.tools.object_factory.Slice import json_slice_id -from common.tools.object_factory.Topology import json_topology_id -from common.type_checkers.Assertions import ( - validate_connection, validate_connection_ids, validate_connections, validate_context, validate_context_ids, - validate_contexts, validate_device, validate_device_ids, validate_devices, validate_link, validate_link_ids, - validate_links, validate_service, validate_service_ids, validate_services, validate_topologies, validate_topology, - validate_topology_ids) +import pytest from context.client.ContextClient import ContextClient -from context.client.EventsCollector import EventsCollector -from context.service.database.Tools import ( - FASTHASHER_DATA_ACCEPTED_FORMAT, FASTHASHER_ITEM_ACCEPTED_FORMAT, fast_hasher) -from context.service.ContextService import ContextService -#from context.service._old_code.Populate import populate -#from context.service.rest_server.RestServer import RestServer -#from context.service.rest_server.Resources import RESOURCES -from requests import Session -from sqlalchemy import create_engine -from sqlalchemy.orm import sessionmaker -from context.service.database._Base import _Base -from common.Settings import get_setting -from context.service.Engine import Engine -from context.service.database._Base import rebuild_database - -from .Objects import ( - CONNECTION_R1_R3, CONNECTION_R1_R3_ID, CONNECTION_R1_R3_UUID, CONTEXT, CONTEXT_ID, DEVICE_R1, DEVICE_R1_ID, - DEVICE_R1_UUID, DEVICE_R2, DEVICE_R2_ID, DEVICE_R2_UUID, DEVICE_R3, DEVICE_R3_ID, DEVICE_R3_UUID, LINK_R1_R2, - LINK_R1_R2_ID, LINK_R1_R2_UUID, SERVICE_R1_R2, SERVICE_R1_R2_ID, SERVICE_R1_R2_UUID, SERVICE_R1_R3, - SERVICE_R1_R3_ID, SERVICE_R1_R3_UUID, SERVICE_R2_R3, SERVICE_R2_R3_ID, SERVICE_R2_R3_UUID, TOPOLOGY, TOPOLOGY_ID, - POLICY_RULE, POLICY_RULE_ID, POLICY_RULE_UUID) - -LOGGER = logging.getLogger(__name__) -LOGGER.setLevel(logging.DEBUG) - -LOCAL_HOST = '127.0.0.1' -GRPC_PORT = 10000 + int(get_service_port_grpc(ServiceNameEnum.CONTEXT)) # avoid privileged ports -HTTP_PORT = 10000 + int(get_service_port_http(ServiceNameEnum.CONTEXT)) # avoid privileged ports - -os.environ[get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_HOST )] = str(LOCAL_HOST) -os.environ[get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(GRPC_PORT) -os.environ[get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_HTTP)] = str(HTTP_PORT) - -#DEFAULT_REDIS_SERVICE_HOST = LOCAL_HOST -#DEFAULT_REDIS_SERVICE_PORT = 6379 -#DEFAULT_REDIS_DATABASE_ID = 0 - -#REDIS_CONFIG = { -# 'REDIS_SERVICE_HOST': os.environ.get('REDIS_SERVICE_HOST', DEFAULT_REDIS_SERVICE_HOST), -# 'REDIS_SERVICE_PORT': os.environ.get('REDIS_SERVICE_PORT', DEFAULT_REDIS_SERVICE_PORT), -# 'REDIS_DATABASE_ID' : os.environ.get('REDIS_DATABASE_ID', DEFAULT_REDIS_DATABASE_ID ), -#} - -#SCENARIOS = [ -# ('db:cockroach_mb:inmemory', None, {}, None, {}), -# ('all_inmemory', DatabaseBackendEnum.INMEMORY, {}, MessageBrokerBackendEnum.INMEMORY, {} ) -# ('all_redis', DatabaseBackendEnum.REDIS, REDIS_CONFIG, MessageBrokerBackendEnum.REDIS, REDIS_CONFIG), -#] - -#@pytest.fixture(scope='session', ids=[str(scenario[0]) for scenario in SCENARIOS], params=SCENARIOS) -@pytest.fixture(scope='session') -def context_db_mb(request) -> Tuple[Session, MessageBroker]: - #name,db_session,mb_backend,mb_settings = request.param - #msg = 'Running scenario {:s} db_session={:s}, mb_backend={:s}, mb_settings={:s}...' - #LOGGER.info(msg.format(str(name), str(db_session), str(mb_backend.value), str(mb_settings))) - - _db_engine = Engine.get_engine() - Engine.drop_database(_db_engine) - Engine.create_database(_db_engine) - rebuild_database(_db_engine) - - _msg_broker = MessageBroker(get_messagebroker_backend(backend=MessageBrokerBackendEnum.INMEMORY)) - yield _db_engine, _msg_broker - _msg_broker.terminate() - -@pytest.fixture(scope='session') -def context_service_grpc(context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name - _service = ContextService(context_db_mb[0], context_db_mb[1]) - _service.start() - yield _service - _service.stop() - -#@pytest.fixture(scope='session') -#def context_service_rest(context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name -# database = context_db_mb[0] -# _rest_server = RestServer() -# for endpoint_name, resource_class, resource_url in RESOURCES: -# _rest_server.add_resource(resource_class, resource_url, endpoint=endpoint_name, resource_class_args=(database,)) -# _rest_server.start() -# time.sleep(1) # bring time for the server to start -# yield _rest_server -# _rest_server.shutdown() -# _rest_server.join() - -@pytest.fixture(scope='session') -def context_client_grpc(context_service_grpc : ContextService): # pylint: disable=redefined-outer-name - _client = ContextClient() - yield _client - _client.close() - -#def do_rest_request(url : str): -# base_url = get_service_baseurl_http(ServiceNameEnum.CONTEXT) -# request_url = 'http://{:s}:{:s}{:s}{:s}'.format(str(LOCAL_HOST), str(HTTP_PORT), str(base_url), url) -# LOGGER.warning('Request: GET {:s}'.format(str(request_url))) -# reply = requests.get(request_url) -# LOGGER.warning('Reply: {:s}'.format(str(reply.text))) -# assert reply.status_code == 200, 'Reply failed with code {}'.format(reply.status_code) -# return reply.json() - -# pylint: disable=redefined-outer-name, unused-argument -def test_grpc_initialize(context_client_grpc : ContextClient) -> None: - # dummy method used to initialize fixtures, database, message broker, etc. - pass - -# ----- Test gRPC methods ---------------------------------------------------------------------------------------------- - -def test_grpc_context(context_client_grpc : ContextClient) -> None: # pylint: disable=redefined-outer-name - - # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- - #events_collector = EventsCollector( - # context_client_grpc, log_events_received=True, - # activate_context_collector = True, activate_topology_collector = False, activate_device_collector = False, - # activate_link_collector = False, activate_service_collector = False, activate_slice_collector = False, - # activate_connection_collector = False) - #events_collector.start() - - # ----- Get when the object does not exist ------------------------------------------------------------------------- - with pytest.raises(grpc.RpcError) as e: - context_client_grpc.GetContext(ContextId(**CONTEXT_ID)) - assert e.value.code() == grpc.StatusCode.NOT_FOUND - assert e.value.details() == 'Context({:s}) not found'.format(DEFAULT_CONTEXT_UUID) - - # ----- List when the object does not exist ------------------------------------------------------------------------ - response = context_client_grpc.ListContextIds(Empty()) - assert len(response.context_ids) == 0 - - response = context_client_grpc.ListContexts(Empty()) - assert len(response.contexts) == 0 - - # ----- Create the object ------------------------------------------------------------------------------------------ - response = context_client_grpc.SetContext(Context(**CONTEXT)) - assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - wrong_context_uuid = str(uuid.uuid4()) - wrong_context_id = json_context_id(wrong_context_uuid) - with pytest.raises(grpc.RpcError) as e: - WRONG_CONTEXT = copy.deepcopy(CONTEXT) - WRONG_CONTEXT['topology_ids'].append(json_topology_id(str(uuid.uuid4()), context_id=wrong_context_id)) - context_client_grpc.SetContext(Context(**WRONG_CONTEXT)) - assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT - msg = 'request.topology_ids[0].context_id.context_uuid.uuid({}) is invalid; '\ - 'should be == request.context_id.context_uuid.uuid({})'.format(wrong_context_uuid, DEFAULT_CONTEXT_UUID) - assert e.value.details() == msg - - with pytest.raises(grpc.RpcError) as e: - WRONG_CONTEXT = copy.deepcopy(CONTEXT) - WRONG_CONTEXT['service_ids'].append(json_service_id(str(uuid.uuid4()), context_id=wrong_context_id)) - context_client_grpc.SetContext(Context(**WRONG_CONTEXT)) - assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT - msg = 'request.service_ids[0].context_id.context_uuid.uuid({}) is invalid; '\ - 'should be == request.context_id.context_uuid.uuid({})'.format(wrong_context_uuid, DEFAULT_CONTEXT_UUID) - assert e.value.details() == msg - - with pytest.raises(grpc.RpcError) as e: - WRONG_CONTEXT = copy.deepcopy(CONTEXT) - WRONG_CONTEXT['slice_ids'].append(json_slice_id(str(uuid.uuid4()), context_id=wrong_context_id)) - context_client_grpc.SetContext(Context(**WRONG_CONTEXT)) - assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT - msg = 'request.slice_ids[0].context_id.context_uuid.uuid({}) is invalid; '\ - 'should be == request.context_id.context_uuid.uuid({})'.format(wrong_context_uuid, DEFAULT_CONTEXT_UUID) - assert e.value.details() == msg - - # ----- Check create event ----------------------------------------------------------------------------------------- - #event = events_collector.get_event(block=True, timeout=10.0) - #assert isinstance(event, ContextEvent) - #assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE - #assert event.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - # ----- Get when the object exists --------------------------------------------------------------------------------- - response = context_client_grpc.GetContext(ContextId(**CONTEXT_ID)) - assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.name == '' - assert len(response.topology_ids) == 0 - assert len(response.service_ids) == 0 - assert len(response.slice_ids) == 0 - - # ----- List when the object exists -------------------------------------------------------------------------------- - response = context_client_grpc.ListContextIds(Empty()) - assert len(response.context_ids) == 1 - assert response.context_ids[0].context_uuid.uuid == DEFAULT_CONTEXT_UUID - - response = context_client_grpc.ListContexts(Empty()) - assert len(response.contexts) == 1 - assert response.contexts[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.contexts[0].name == '' - assert len(response.contexts[0].topology_ids) == 0 - assert len(response.contexts[0].service_ids) == 0 - assert len(response.contexts[0].slice_ids) == 0 - - # ----- Update the object ------------------------------------------------------------------------------------------ - new_context_name = 'new' - CONTEXT_WITH_NAME = copy.deepcopy(CONTEXT) - CONTEXT_WITH_NAME['name'] = new_context_name - response = context_client_grpc.SetContext(Context(**CONTEXT_WITH_NAME)) - assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - # ----- Check update event ----------------------------------------------------------------------------------------- - #event = events_collector.get_event(block=True, timeout=10.0) - #assert isinstance(event, ContextEvent) - #assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - #assert event.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - # ----- Get when the object is modified ---------------------------------------------------------------------------- - response = context_client_grpc.GetContext(ContextId(**CONTEXT_ID)) - assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.name == new_context_name - assert len(response.topology_ids) == 0 - assert len(response.service_ids) == 0 - assert len(response.slice_ids) == 0 - - # ----- List when the object is modified --------------------------------------------------------------------------- - response = context_client_grpc.ListContextIds(Empty()) - assert len(response.context_ids) == 1 - assert response.context_ids[0].context_uuid.uuid == DEFAULT_CONTEXT_UUID - - response = context_client_grpc.ListContexts(Empty()) - assert len(response.contexts) == 1 - assert response.contexts[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.contexts[0].name == new_context_name - assert len(response.contexts[0].topology_ids) == 0 - assert len(response.contexts[0].service_ids) == 0 - assert len(response.contexts[0].slice_ids) == 0 - - # ----- Remove the object ------------------------------------------------------------------------------------------ - context_client_grpc.RemoveContext(ContextId(**CONTEXT_ID)) - - # ----- Check remove event ----------------------------------------------------------------------------------------- - #event = events_collector.get_event(block=True, timeout=10.0) - #assert isinstance(event, ContextEvent) - #assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - #assert event.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - # ----- List after deleting the object ----------------------------------------------------------------------------- - response = context_client_grpc.ListContextIds(Empty()) - assert len(response.context_ids) == 0 - - response = context_client_grpc.ListContexts(Empty()) - assert len(response.contexts) == 0 - - # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- - #events_collector.stop() - - -def test_grpc_topology(context_client_grpc : ContextClient) -> None: # pylint: disable=redefined-outer-name - - # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- - #events_collector = EventsCollector( - # context_client_grpc, log_events_received=True, - # activate_context_collector = False, activate_topology_collector = True, activate_device_collector = False, - # activate_link_collector = False, activate_service_collector = False, activate_slice_collector = False, - # activate_connection_collector = False) - #events_collector.start() - - # ----- Prepare dependencies for the test and capture related events ----------------------------------------------- - response = context_client_grpc.SetContext(Context(**CONTEXT)) - assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID - # event = events_collector.get_event(block=True) - # assert isinstance(event, ContextEvent) - # assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE - # assert event.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - # ----- Get when the object does not exist ------------------------------------------------------------------------- - with pytest.raises(grpc.RpcError) as e: - context_client_grpc.GetTopology(TopologyId(**TOPOLOGY_ID)) - assert e.value.code() == grpc.StatusCode.NOT_FOUND - assert e.value.details() == 'Topology({:s}/{:s}) not found'.format(DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID) - - # ----- List when the object does not exist ------------------------------------------------------------------------ - response = context_client_grpc.ListTopologyIds(ContextId(**CONTEXT_ID)) - assert len(response.topology_ids) == 0 - - response = context_client_grpc.ListTopologies(ContextId(**CONTEXT_ID)) - assert len(response.topologies) == 0 - - # ----- Create the object ------------------------------------------------------------------------------------------ - response = context_client_grpc.SetTopology(Topology(**TOPOLOGY)) - assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - - #CONTEXT_WITH_TOPOLOGY = copy.deepcopy(CONTEXT) - #CONTEXT_WITH_TOPOLOGY['topology_ids'].append(TOPOLOGY_ID) - #response = context_client_grpc.SetContext(Context(**CONTEXT_WITH_TOPOLOGY)) - #assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - # ----- Check create event ----------------------------------------------------------------------------------------- - #events = events_collector.get_events(block=True, count=2) - #assert isinstance(events[0], TopologyEvent) - #assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - #assert events[0].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - #assert events[0].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - #assert isinstance(events[1], ContextEvent) - #assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - #assert events[1].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - # ----- Get when the object exists --------------------------------------------------------------------------------- - response = context_client_grpc.GetContext(ContextId(**CONTEXT_ID)) - assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.name == '' - assert len(response.topology_ids) == 1 - assert response.topology_ids[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.topology_ids[0].topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - assert len(response.service_ids) == 0 - assert len(response.slice_ids) == 0 - - response = context_client_grpc.GetTopology(TopologyId(**TOPOLOGY_ID)) - assert response.topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - assert response.name == '' - assert len(response.device_ids) == 0 - assert len(response.link_ids) == 0 - - # ----- List when the object exists -------------------------------------------------------------------------------- - response = context_client_grpc.ListTopologyIds(ContextId(**CONTEXT_ID)) - assert len(response.topology_ids) == 1 - assert response.topology_ids[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.topology_ids[0].topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - - response = context_client_grpc.ListTopologies(ContextId(**CONTEXT_ID)) - assert len(response.topologies) == 1 - assert response.topologies[0].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.topologies[0].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - assert response.topologies[0].name == '' - assert len(response.topologies[0].device_ids) == 0 - assert len(response.topologies[0].link_ids) == 0 - - # ----- Update the object ------------------------------------------------------------------------------------------ - new_topology_name = 'new' - TOPOLOGY_WITH_NAME = copy.deepcopy(TOPOLOGY) - TOPOLOGY_WITH_NAME['name'] = new_topology_name - response = context_client_grpc.SetTopology(Topology(**TOPOLOGY_WITH_NAME)) - assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - - # ----- Check update event ----------------------------------------------------------------------------------------- - #event = events_collector.get_event(block=True) - #assert isinstance(event, TopologyEvent) - #assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - #assert event.topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - #assert event.topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - - # ----- Get when the object is modified ---------------------------------------------------------------------------- - response = context_client_grpc.GetTopology(TopologyId(**TOPOLOGY_ID)) - assert response.topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - assert response.name == new_topology_name - assert len(response.device_ids) == 0 - assert len(response.link_ids) == 0 - - # ----- List when the object is modified --------------------------------------------------------------------------- - response = context_client_grpc.ListTopologyIds(ContextId(**CONTEXT_ID)) - assert len(response.topology_ids) == 1 - assert response.topology_ids[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.topology_ids[0].topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - - response = context_client_grpc.ListTopologies(ContextId(**CONTEXT_ID)) - assert len(response.topologies) == 1 - assert response.topologies[0].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.topologies[0].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - assert response.topologies[0].name == new_topology_name - assert len(response.topologies[0].device_ids) == 0 - assert len(response.topologies[0].link_ids) == 0 - - # ----- Remove the object ------------------------------------------------------------------------------------------ - context_client_grpc.RemoveTopology(TopologyId(**TOPOLOGY_ID)) - - # ----- Check remove event ----------------------------------------------------------------------------------------- - #event = events_collector.get_event(block=True) - #assert isinstance(event, TopologyEvent) - #assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - #assert event.topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - #assert event.topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - - # ----- List after deleting the object ----------------------------------------------------------------------------- - response = context_client_grpc.ListTopologyIds(ContextId(**CONTEXT_ID)) - assert len(response.topology_ids) == 0 - - response = context_client_grpc.ListTopologies(ContextId(**CONTEXT_ID)) - assert len(response.topologies) == 0 - - # ----- Clean dependencies used in the test and capture related events --------------------------------------------- - context_client_grpc.RemoveContext(ContextId(**CONTEXT_ID)) - #event = events_collector.get_event(block=True) - #assert isinstance(event, ContextEvent) - #assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - #assert event.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- - #events_collector.stop() - -def test_grpc_device(context_client_grpc : ContextClient) -> None: # pylint: disable=redefined-outer-name - - # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- - #events_collector = EventsCollector( - # context_client_grpc, log_events_received=True, - # activate_context_collector = False, activate_topology_collector = False, activate_device_collector = True, - # activate_link_collector = False, activate_service_collector = False, activate_slice_collector = False, - # activate_connection_collector = False) - #events_collector.start() - - # ----- Prepare dependencies for the test and capture related events ----------------------------------------------- - response = context_client_grpc.SetContext(Context(**CONTEXT)) - assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - response = context_client_grpc.SetTopology(Topology(**TOPOLOGY)) - assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - - #events = events_collector.get_events(block=True, count=2) - #assert isinstance(events[0], ContextEvent) - #assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - #assert events[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - #assert isinstance(events[1], TopologyEvent) - #assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - #assert events[1].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - #assert events[1].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - - # ----- Get when the object does not exist ------------------------------------------------------------------------- - with pytest.raises(grpc.RpcError) as e: - context_client_grpc.GetDevice(DeviceId(**DEVICE_R1_ID)) - assert e.value.code() == grpc.StatusCode.NOT_FOUND - assert e.value.details() == 'Device({:s}) not found'.format(DEVICE_R1_UUID) - - # ----- List when the object does not exist ------------------------------------------------------------------------ - response = context_client_grpc.ListDeviceIds(Empty()) - assert len(response.device_ids) == 0 - - response = context_client_grpc.ListDevices(Empty()) - assert len(response.devices) == 0 - - # ----- Create the object ------------------------------------------------------------------------------------------ - with pytest.raises(grpc.RpcError) as e: - WRONG_DEVICE = copy.deepcopy(DEVICE_R1) - WRONG_DEVICE_UUID = '3f03c76d-31fb-47f5-9c1d-bc6b6bfa2d08' - WRONG_DEVICE['device_endpoints'][0]['endpoint_id']['device_id']['device_uuid']['uuid'] = WRONG_DEVICE_UUID - context_client_grpc.SetDevice(Device(**WRONG_DEVICE)) - assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT - msg = 'request.device_endpoints[0].device_id.device_uuid.uuid({}) is invalid; '\ - 'should be == request.device_id.device_uuid.uuid({})'.format(WRONG_DEVICE_UUID, DEVICE_R1_UUID) - assert e.value.details() == msg - - response = context_client_grpc.SetDevice(Device(**DEVICE_R1)) - assert response.device_uuid.uuid == DEVICE_R1_UUID - - # ----- Check create event ----------------------------------------------------------------------------------------- - # event = events_collector.get_event(block=True) - # assert isinstance(event, DeviceEvent) - # assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE - # assert event.device_id.device_uuid.uuid == DEVICE_R1_UUID - - # ----- Get when the object exists --------------------------------------------------------------------------------- - response = context_client_grpc.GetDevice(DeviceId(**DEVICE_R1_ID)) - assert response.device_id.device_uuid.uuid == DEVICE_R1_UUID - assert response.name == '' - assert response.device_type == 'packet-router' - #assert len(response.device_config.config_rules) == 3 - assert response.device_operational_status == DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_DISABLED - assert len(response.device_drivers) == 1 - assert DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG in response.device_drivers - #assert len(response.device_endpoints) == 3 - - # ----- List when the object exists -------------------------------------------------------------------------------- - response = context_client_grpc.ListDeviceIds(Empty()) - assert len(response.device_ids) == 1 - assert response.device_ids[0].device_uuid.uuid == DEVICE_R1_UUID - - response = context_client_grpc.ListDevices(Empty()) - assert len(response.devices) == 1 - assert response.devices[0].device_id.device_uuid.uuid == DEVICE_R1_UUID - assert response.devices[0].name == '' - assert response.devices[0].device_type == 'packet-router' - #assert len(response.devices[0].device_config.config_rules) == 3 - assert response.devices[0].device_operational_status == DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_DISABLED - assert len(response.devices[0].device_drivers) == 1 - assert DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG in response.devices[0].device_drivers - #assert len(response.devices[0].device_endpoints) == 3 - - # ----- Update the object ------------------------------------------------------------------------------------------ - new_device_name = 'r1' - new_device_driver = DeviceDriverEnum.DEVICEDRIVER_UNDEFINED - DEVICE_UPDATED = copy.deepcopy(DEVICE_R1) - DEVICE_UPDATED['name'] = new_device_name - DEVICE_UPDATED['device_operational_status'] = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED - DEVICE_UPDATED['device_drivers'].append(new_device_driver) - response = context_client_grpc.SetDevice(Device(**DEVICE_UPDATED)) - assert response.device_uuid.uuid == DEVICE_R1_UUID - - # ----- Check update event ----------------------------------------------------------------------------------------- - # event = events_collector.get_event(block=True) - # assert isinstance(event, DeviceEvent) - # assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - # assert event.device_id.device_uuid.uuid == DEVICE_R1_UUID - - # ----- Get when the object is modified ---------------------------------------------------------------------------- - response = context_client_grpc.GetDevice(DeviceId(**DEVICE_R1_ID)) - assert response.device_id.device_uuid.uuid == DEVICE_R1_UUID - assert response.name == 'r1' - assert response.device_type == 'packet-router' - #assert len(response.device_config.config_rules) == 3 - assert response.device_operational_status == DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED - assert len(response.device_drivers) == 2 - assert DeviceDriverEnum.DEVICEDRIVER_UNDEFINED in response.device_drivers - assert DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG in response.device_drivers - #assert len(response.device_endpoints) == 3 - - # ----- List when the object is modified --------------------------------------------------------------------------- - response = context_client_grpc.ListDeviceIds(Empty()) - assert len(response.device_ids) == 1 - assert response.device_ids[0].device_uuid.uuid == DEVICE_R1_UUID - - response = context_client_grpc.ListDevices(Empty()) - assert len(response.devices) == 1 - assert response.devices[0].device_id.device_uuid.uuid == DEVICE_R1_UUID - assert response.devices[0].name == 'r1' - assert response.devices[0].device_type == 'packet-router' - #assert len(response.devices[0].device_config.config_rules) == 3 - assert response.devices[0].device_operational_status == DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED - assert len(response.devices[0].device_drivers) == 2 - assert DeviceDriverEnum.DEVICEDRIVER_UNDEFINED in response.devices[0].device_drivers - assert DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG in response.devices[0].device_drivers - #assert len(response.devices[0].device_endpoints) == 3 - - # ----- Create object relation ------------------------------------------------------------------------------------- - TOPOLOGY_WITH_DEVICE = copy.deepcopy(TOPOLOGY) - TOPOLOGY_WITH_DEVICE['device_ids'].append(DEVICE_R1_ID) - response = context_client_grpc.SetTopology(Topology(**TOPOLOGY_WITH_DEVICE)) - assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - - # ----- Check update event ----------------------------------------------------------------------------------------- - # event = events_collector.get_event(block=True) - # assert isinstance(event, TopologyEvent) - # assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - # assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - # assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - - # ----- Check relation was created --------------------------------------------------------------------------------- - response = context_client_grpc.GetTopology(TopologyId(**TOPOLOGY_ID)) - assert response.topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - assert len(response.device_ids) == 1 - assert response.device_ids[0].device_uuid.uuid == DEVICE_R1_UUID - assert len(response.link_ids) == 0 - - # ----- Remove the object ------------------------------------------------------------------------------------------ - context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R1_ID)) - context_client_grpc.RemoveTopology(TopologyId(**TOPOLOGY_ID)) - context_client_grpc.RemoveContext(ContextId(**CONTEXT_ID)) - - # ----- Check remove event ----------------------------------------------------------------------------------------- - # events = events_collector.get_events(block=True, count=3) - - # assert isinstance(events[0], DeviceEvent) - # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - # assert events[0].device_id.device_uuid.uuid == DEVICE_R1_UUID - - # assert isinstance(events[1], TopologyEvent) - # assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - # assert events[1].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - # assert events[1].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - - # assert isinstance(events[2], ContextEvent) - # assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - # assert events[2].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- - # events_collector.stop() - - -""" -def test_grpc_link( - context_client_grpc: ContextClient, # pylint: disable=redefined-outer-name - context_db_mb: Tuple[Session, MessageBroker]): # pylint: disable=redefined-outer-name - session = context_db_mb[0] - - database = Database(session) - - # ----- Clean the database ----------------------------------------------------------------------------------------- - database.clear() - - # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- - events_collector = EventsCollector(context_client_grpc) - events_collector.start() - - # ----- Prepare dependencies for the test and capture related events ----------------------------------------------- - response = context_client_grpc.SetContext(Context(**CONTEXT)) - assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - response = context_client_grpc.SetTopology(Topology(**TOPOLOGY)) - assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - - response = context_client_grpc.SetDevice(Device(**DEVICE_R1)) - assert response.device_uuid.uuid == DEVICE_R1_UUID - - response = context_client_grpc.SetDevice(Device(**DEVICE_R2)) - assert response.device_uuid.uuid == DEVICE_R2_UUID - # events = events_collector.get_events(block=True, count=4) - - # assert isinstance(events[0], ContextEvent) - # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - # assert events[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - # - # assert isinstance(events[1], TopologyEvent) - # assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - # assert events[1].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - # assert events[1].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - # - # assert isinstance(events[2], DeviceEvent) - # assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - # assert events[2].device_id.device_uuid.uuid == DEVICE_R1_UUID - # - # assert isinstance(events[3], DeviceEvent) - # assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - # assert events[3].device_id.device_uuid.uuid == DEVICE_R2_UUID - - # ----- Get when the object does not exist ------------------------------------------------------------------------- - with pytest.raises(grpc.RpcError) as e: - context_client_grpc.GetLink(LinkId(**LINK_R1_R2_ID)) - assert e.value.code() == grpc.StatusCode.NOT_FOUND - assert e.value.details() == 'Link({:s}) not found'.format(LINK_R1_R2_UUID) - - # ----- List when the object does not exist ------------------------------------------------------------------------ - response = context_client_grpc.ListLinkIds(Empty()) - assert len(response.link_ids) == 0 - - response = context_client_grpc.ListLinks(Empty()) - assert len(response.links) == 0 - - # ----- Dump state of database before create the object ------------------------------------------------------------ - db_entries = database.dump_all() - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(db_entry) - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 80 - - # ----- Create the object ------------------------------------------------------------------------------------------ - response = context_client_grpc.SetLink(Link(**LINK_R1_R2)) - assert response.link_uuid.uuid == LINK_R1_R2_UUID - - # ----- Check create event ----------------------------------------------------------------------------------------- - # event = events_collector.get_event(block=True) - # assert isinstance(event, LinkEvent) - # assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE - # assert event.link_id.link_uuid.uuid == LINK_R1_R2_UUID - - # ----- Update the object ------------------------------------------------------------------------------------------ - response = context_client_grpc.SetLink(Link(**LINK_R1_R2)) - assert response.link_uuid.uuid == LINK_R1_R2_UUID - # ----- Check update event ----------------------------------------------------------------------------------------- - # event = events_collector.get_event(block=True) - # assert isinstance(event, LinkEvent) - # assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - # assert event.link_id.link_uuid.uuid == LINK_R1_R2_UUID - - # ----- Dump state of database after create/update the object ------------------------------------------------------ - db_entries = database.dump_all() - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(db_entry) - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 88 - - # ----- Get when the object exists --------------------------------------------------------------------------------- - response = context_client_grpc.GetLink(LinkId(**LINK_R1_R2_ID)) - assert response.link_id.link_uuid.uuid == LINK_R1_R2_UUID - assert len(response.link_endpoint_ids) == 2 - - # ----- List when the object exists -------------------------------------------------------------------------------- - response = context_client_grpc.ListLinkIds(Empty()) - assert len(response.link_ids) == 1 - assert response.link_ids[0].link_uuid.uuid == LINK_R1_R2_UUID - - response = context_client_grpc.ListLinks(Empty()) - assert len(response.links) == 1 - assert response.links[0].link_id.link_uuid.uuid == LINK_R1_R2_UUID - - assert len(response.links[0].link_endpoint_ids) == 2 - - # ----- Create object relation ------------------------------------------------------------------------------------- - TOPOLOGY_WITH_LINK = copy.deepcopy(TOPOLOGY) - TOPOLOGY_WITH_LINK['link_ids'].append(LINK_R1_R2_ID) - response = context_client_grpc.SetTopology(Topology(**TOPOLOGY_WITH_LINK)) - assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - - # ----- Check update event ----------------------------------------------------------------------------------------- - # event = events_collector.get_event(block=True) - # assert isinstance(event, TopologyEvent) - # assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - # assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - # assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - - # ----- Check relation was created --------------------------------------------------------------------------------- - response = context_client_grpc.GetTopology(TopologyId(**TOPOLOGY_ID)) - assert response.topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - assert len(response.device_ids) == 2 - # assert response.device_ids[0].device_uuid.uuid == DEVICE_R1_UUID - # assert response.device_ids[1].device_uuid.uuid == DEVICE_R2_UUID - assert len(response.link_ids) == 1 - assert response.link_ids[0].link_uuid.uuid == LINK_R1_R2_UUID - - db_entries = database.dump_all() - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(db_entry) - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 88 - - # ----- Remove the object ------------------------------------------------------------------------------------------ - context_client_grpc.RemoveLink(LinkId(**LINK_R1_R2_ID)) - context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R1_ID)) - context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R2_ID)) - context_client_grpc.RemoveTopology(TopologyId(**TOPOLOGY_ID)) - context_client_grpc.RemoveContext(ContextId(**CONTEXT_ID)) - - # ----- Check remove event ----------------------------------------------------------------------------------------- - # events = events_collector.get_events(block=True, count=5) - # - # assert isinstance(events[0], LinkEvent) - # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - # assert events[0].link_id.link_uuid.uuid == LINK_R1_R2_UUID - # - # assert isinstance(events[1], DeviceEvent) - # assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - # assert events[1].device_id.device_uuid.uuid == DEVICE_R1_UUID - # - # assert isinstance(events[2], DeviceEvent) - # assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - # assert events[2].device_id.device_uuid.uuid == DEVICE_R2_UUID - # - # assert isinstance(events[3], TopologyEvent) - # assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - # assert events[3].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - # assert events[3].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - # - # assert isinstance(events[4], ContextEvent) - # assert events[4].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - # assert events[4].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- - events_collector.stop() - - # ----- Dump state of database after remove the object ------------------------------------------------------------- - db_entries = database.dump_all() - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(db_entry) - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 0 -""" - -""" -def test_grpc_service( - context_client_grpc : ContextClient, # pylint: disable=redefined-outer-name - context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name - Session = context_db_mb[0] - # ----- Clean the database ----------------------------------------------------------------------------------------- - database = Database(Session) - database.clear() - - # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- - events_collector = EventsCollector(context_client_grpc) - events_collector.start() - - # ----- Prepare dependencies for the test and capture related events ----------------------------------------------- - response = context_client_grpc.SetContext(Context(**CONTEXT)) - assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - response = context_client_grpc.SetTopology(Topology(**TOPOLOGY)) - assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - - response = context_client_grpc.SetDevice(Device(**DEVICE_R1)) - assert response.device_uuid.uuid == DEVICE_R1_UUID - - response = context_client_grpc.SetDevice(Device(**DEVICE_R2)) - assert response.device_uuid.uuid == DEVICE_R2_UUID - # events = events_collector.get_events(block=True, count=4) - # - # assert isinstance(events[0], ContextEvent) - # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - # assert events[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - # - # assert isinstance(events[1], TopologyEvent) - # assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - # assert events[1].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - # assert events[1].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - # - # assert isinstance(events[2], DeviceEvent) - # assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - # assert events[2].device_id.device_uuid.uuid == DEVICE_R1_UUID - # - # assert isinstance(events[3], DeviceEvent) - # assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - # assert events[3].device_id.device_uuid.uuid == DEVICE_R2_UUID - LOGGER.info('----------------') - - # ----- Get when the object does not exist ------------------------------------------------------------------------- - with pytest.raises(grpc.RpcError) as e: - context_client_grpc.GetService(ServiceId(**SERVICE_R1_R2_ID)) - assert e.value.code() == grpc.StatusCode.NOT_FOUND - assert e.value.details() == 'Service({:s}) not found'.format(SERVICE_R1_R2_UUID) - LOGGER.info('----------------') - - # ----- List when the object does not exist ------------------------------------------------------------------------ - response = context_client_grpc.ListServiceIds(ContextId(**CONTEXT_ID)) - assert len(response.service_ids) == 0 - LOGGER.info('----------------') - - response = context_client_grpc.ListServices(ContextId(**CONTEXT_ID)) - assert len(response.services) == 0 - LOGGER.info('----------------') - - # ----- Dump state of database before create the object ------------------------------------------------------------ - db_entries = database.dump_all() - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(db_entry) - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 80 - - # ----- Create the object ------------------------------------------------------------------------------------------ - with pytest.raises(grpc.RpcError) as e: - WRONG_SERVICE = copy.deepcopy(SERVICE_R1_R2) - WRONG_SERVICE['service_endpoint_ids'][0]\ - ['topology_id']['context_id']['context_uuid']['uuid'] = 'ca1ea172-728f-441d-972c-feeae8c9bffc' - context_client_grpc.SetService(Service(**WRONG_SERVICE)) - assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT - msg = 'request.service_endpoint_ids[0].topology_id.context_id.context_uuid.uuid(ca1ea172-728f-441d-972c-feeae8c9bffc) is invalid; '\ - 'should be == request.service_id.context_id.context_uuid.uuid({:s})'.format(DEFAULT_CONTEXT_UUID) - assert e.value.details() == msg - - response = context_client_grpc.SetService(Service(**SERVICE_R1_R2)) - assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.service_uuid.uuid == SERVICE_R1_R2_UUID - - CONTEXT_WITH_SERVICE = copy.deepcopy(CONTEXT) - CONTEXT_WITH_SERVICE['service_ids'].append(SERVICE_R1_R2_ID) - response = context_client_grpc.SetContext(Context(**CONTEXT_WITH_SERVICE)) - assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - # ----- Check create event ----------------------------------------------------------------------------------------- - events = events_collector.get_events(block=True, count=2) - - assert isinstance(events[0], ServiceEvent) - assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[0].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert events[0].service_id.service_uuid.uuid == SERVICE_R1_R2_UUID - - assert isinstance(events[1], ContextEvent) - assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - assert events[1].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - # ----- Update the object ------------------------------------------------------------------------------------------ - response = context_client_grpc.SetService(Service(**SERVICE_R1_R2)) - assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.service_uuid.uuid == SERVICE_R1_R2_UUID - - # ----- Check update event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True) - assert isinstance(event, ServiceEvent) - assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - assert event.service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert event.service_id.service_uuid.uuid == SERVICE_R1_R2_UUID - - # ----- Dump state of database after create/update the object ------------------------------------------------------ - db_entries = context_database.dump() - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 108 - - # ----- Get when the object exists --------------------------------------------------------------------------------- - response = context_client_grpc.GetService(ServiceId(**SERVICE_R1_R2_ID)) - assert response.service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.service_id.service_uuid.uuid == SERVICE_R1_R2_UUID - assert response.service_type == ServiceTypeEnum.SERVICETYPE_L3NM - assert len(response.service_endpoint_ids) == 2 - assert len(response.service_constraints) == 2 - assert response.service_status.service_status == ServiceStatusEnum.SERVICESTATUS_PLANNED - assert len(response.service_config.config_rules) == 3 - - # ----- List when the object exists -------------------------------------------------------------------------------- - response = context_client_grpc.ListServiceIds(ContextId(**CONTEXT_ID)) - assert len(response.service_ids) == 1 - assert response.service_ids[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.service_ids[0].service_uuid.uuid == SERVICE_R1_R2_UUID - - response = context_client_grpc.ListServices(ContextId(**CONTEXT_ID)) - assert len(response.services) == 1 - assert response.services[0].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.services[0].service_id.service_uuid.uuid == SERVICE_R1_R2_UUID - assert response.services[0].service_type == ServiceTypeEnum.SERVICETYPE_L3NM - assert len(response.services[0].service_endpoint_ids) == 2 - assert len(response.services[0].service_constraints) == 2 - assert response.services[0].service_status.service_status == ServiceStatusEnum.SERVICESTATUS_PLANNED - assert len(response.services[0].service_config.config_rules) == 3 - - # ----- Remove the object ------------------------------------------------------------------------------------------ - context_client_grpc.RemoveService(ServiceId(**SERVICE_R1_R2_ID)) - context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R1_ID)) - context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R2_ID)) - context_client_grpc.RemoveTopology(TopologyId(**TOPOLOGY_ID)) - context_client_grpc.RemoveContext(ContextId(**CONTEXT_ID)) - - # ----- Check remove event ----------------------------------------------------------------------------------------- - events = events_collector.get_events(block=True, count=5) - - assert isinstance(events[0], ServiceEvent) - assert events[0].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert events[0].service_id.service_uuid.uuid == SERVICE_R1_R2_UUID - - assert isinstance(events[1], DeviceEvent) - assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[1].device_id.device_uuid.uuid == DEVICE_R1_UUID - - assert isinstance(events[2], DeviceEvent) - assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[2].device_id.device_uuid.uuid == DEVICE_R2_UUID - - assert isinstance(events[3], TopologyEvent) - assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[3].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert events[3].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - - assert isinstance(events[4], ContextEvent) - assert events[4].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[4].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- - events_collector.stop() - - # ----- Dump state of database after remove the object ------------------------------------------------------------- - db_entries = context_database.dump() - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 0 -""" - -""" -def test_grpc_connection( - context_client_grpc : ContextClient, # pylint: disable=redefined-outer-name - context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name - Session = context_db_mb[0] - - database = Database(Session) - - # ----- Clean the database ----------------------------------------------------------------------------------------- - database.clear() - - # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- - events_collector = EventsCollector(context_client_grpc) - events_collector.start() - - # ----- Prepare dependencies for the test and capture related events ----------------------------------------------- - response = context_client_grpc.SetContext(Context(**CONTEXT)) - assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - response = context_client_grpc.SetTopology(Topology(**TOPOLOGY)) - assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - - response = context_client_grpc.SetDevice(Device(**DEVICE_R1)) - assert response.device_uuid.uuid == DEVICE_R1_UUID - - response = context_client_grpc.SetDevice(Device(**DEVICE_R2)) - assert response.device_uuid.uuid == DEVICE_R2_UUID - - response = context_client_grpc.SetDevice(Device(**DEVICE_R3)) - assert response.device_uuid.uuid == DEVICE_R3_UUID - - response = context_client_grpc.SetService(Service(**SERVICE_R1_R2)) - assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.service_uuid.uuid == SERVICE_R1_R2_UUID - - CONTEXT_WITH_SERVICE = copy.deepcopy(CONTEXT) - CONTEXT_WITH_SERVICE['service_ids'].append(SERVICE_R1_R2_ID) - response = context_client_grpc.SetContext(Context(**CONTEXT_WITH_SERVICE)) - assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - response = context_client_grpc.SetService(Service(**SERVICE_R2_R3)) - assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.service_uuid.uuid == SERVICE_R2_R3_UUID - - CONTEXT_WITH_SERVICE = copy.deepcopy(CONTEXT) - CONTEXT_WITH_SERVICE['service_ids'].append(SERVICE_R2_R3_ID) - response = context_client_grpc.SetContext(Context(**CONTEXT_WITH_SERVICE)) - assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - response = context_client_grpc.SetService(Service(**SERVICE_R1_R3)) - assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.service_uuid.uuid == SERVICE_R1_R3_UUID - - CONTEXT_WITH_SERVICE = copy.deepcopy(CONTEXT) - CONTEXT_WITH_SERVICE['service_ids'].append(SERVICE_R1_R3_ID) - response = context_client_grpc.SetContext(Context(**CONTEXT_WITH_SERVICE)) - assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - events = events_collector.get_events(block=True, count=11) - - assert isinstance(events[0], ContextEvent) - assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - assert isinstance(events[1], TopologyEvent) - assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[1].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert events[1].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - - assert isinstance(events[2], DeviceEvent) - assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[2].device_id.device_uuid.uuid == DEVICE_R1_UUID - - assert isinstance(events[3], DeviceEvent) - assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[3].device_id.device_uuid.uuid == DEVICE_R2_UUID - - assert isinstance(events[4], DeviceEvent) - assert events[4].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[4].device_id.device_uuid.uuid == DEVICE_R3_UUID - - assert isinstance(events[5], ServiceEvent) - assert events[5].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[5].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert events[5].service_id.service_uuid.uuid == SERVICE_R1_R2_UUID - - assert isinstance(events[6], ContextEvent) - assert events[6].event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - assert events[6].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - assert isinstance(events[7], ServiceEvent) - assert events[7].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[7].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert events[7].service_id.service_uuid.uuid == SERVICE_R2_R3_UUID - - assert isinstance(events[8], ContextEvent) - assert events[8].event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - assert events[8].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - assert isinstance(events[9], ServiceEvent) - assert events[9].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[9].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert events[9].service_id.service_uuid.uuid == SERVICE_R1_R3_UUID - - assert isinstance(events[10], ContextEvent) - assert events[10].event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - assert events[10].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - # ----- Get when the object does not exist ------------------------------------------------------------------------- - with pytest.raises(grpc.RpcError) as e: - context_client_grpc.GetConnection(ConnectionId(**CONNECTION_R1_R3_ID)) - assert e.value.code() == grpc.StatusCode.NOT_FOUND - assert e.value.details() == 'Connection({:s}) not found'.format(CONNECTION_R1_R3_UUID) - - # ----- List when the object does not exist ------------------------------------------------------------------------ - response = context_client_grpc.ListConnectionIds(ServiceId(**SERVICE_R1_R3_ID)) - assert len(response.connection_ids) == 0 - - response = context_client_grpc.ListConnections(ServiceId(**SERVICE_R1_R3_ID)) - assert len(response.connections) == 0 - - # ----- Dump state of database before create the object ------------------------------------------------------------ - db_entries = context_database.dump() - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 187 - - # ----- Create the object ------------------------------------------------------------------------------------------ - with pytest.raises(grpc.RpcError) as e: - WRONG_CONNECTION = copy.deepcopy(CONNECTION_R1_R3) - WRONG_CONNECTION['path_hops_endpoint_ids'][0]\ - ['topology_id']['context_id']['context_uuid']['uuid'] = 'wrong-context-uuid' - context_client_grpc.SetConnection(Connection(**WRONG_CONNECTION)) - assert e.value.code() == grpc.StatusCode.NOT_FOUND - # TODO: should we check that all endpoints belong to same topology? - # TODO: should we check that endpoints form links over the topology? - msg = 'EndPoint({:s}/{:s}:wrong-context-uuid/{:s}) not found'.format( - DEVICE_R1_UUID, WRONG_CONNECTION['path_hops_endpoint_ids'][0]['endpoint_uuid']['uuid'], DEFAULT_TOPOLOGY_UUID) - assert e.value.details() == msg - - response = context_client_grpc.SetConnection(Connection(**CONNECTION_R1_R3)) - assert response.connection_uuid.uuid == CONNECTION_R1_R3_UUID - - # ----- Check create event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True) - assert isinstance(event, ConnectionEvent) - assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert event.connection_id.connection_uuid.uuid == CONNECTION_R1_R3_UUID - - # ----- Update the object ------------------------------------------------------------------------------------------ - response = context_client_grpc.SetConnection(Connection(**CONNECTION_R1_R3)) - assert response.connection_uuid.uuid == CONNECTION_R1_R3_UUID - - # ----- Check update event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True) - assert isinstance(event, ConnectionEvent) - assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - assert event.connection_id.connection_uuid.uuid == CONNECTION_R1_R3_UUID - - # ----- Dump state of database after create/update the object ------------------------------------------------------ - db_entries = context_database.dump() - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 203 - - # ----- Get when the object exists --------------------------------------------------------------------------------- - response = context_client_grpc.GetConnection(ConnectionId(**CONNECTION_R1_R3_ID)) - assert response.connection_id.connection_uuid.uuid == CONNECTION_R1_R3_UUID - assert response.service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.service_id.service_uuid.uuid == SERVICE_R1_R3_UUID - assert len(response.path_hops_endpoint_ids) == 6 - assert len(response.sub_service_ids) == 2 - - # ----- List when the object exists -------------------------------------------------------------------------------- - response = context_client_grpc.ListConnectionIds(ServiceId(**SERVICE_R1_R3_ID)) - assert len(response.connection_ids) == 1 - assert response.connection_ids[0].connection_uuid.uuid == CONNECTION_R1_R3_UUID - - response = context_client_grpc.ListConnections(ServiceId(**SERVICE_R1_R3_ID)) - assert len(response.connections) == 1 - assert response.connections[0].connection_id.connection_uuid.uuid == CONNECTION_R1_R3_UUID - assert len(response.connections[0].path_hops_endpoint_ids) == 6 - assert len(response.connections[0].sub_service_ids) == 2 - - # ----- Remove the object ------------------------------------------------------------------------------------------ - context_client_grpc.RemoveConnection(ConnectionId(**CONNECTION_R1_R3_ID)) - context_client_grpc.RemoveService(ServiceId(**SERVICE_R1_R3_ID)) - context_client_grpc.RemoveService(ServiceId(**SERVICE_R2_R3_ID)) - context_client_grpc.RemoveService(ServiceId(**SERVICE_R1_R2_ID)) - context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R1_ID)) - context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R2_ID)) - context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R3_ID)) - context_client_grpc.RemoveTopology(TopologyId(**TOPOLOGY_ID)) - context_client_grpc.RemoveContext(ContextId(**CONTEXT_ID)) - - # ----- Check remove event ----------------------------------------------------------------------------------------- - events = events_collector.get_events(block=True, count=9) - - assert isinstance(events[0], ConnectionEvent) - assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[0].connection_id.connection_uuid.uuid == CONNECTION_R1_R3_UUID - - assert isinstance(events[1], ServiceEvent) - assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[1].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert events[1].service_id.service_uuid.uuid == SERVICE_R1_R3_UUID - - assert isinstance(events[2], ServiceEvent) - assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[2].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert events[2].service_id.service_uuid.uuid == SERVICE_R2_R3_UUID - - assert isinstance(events[3], ServiceEvent) - assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[3].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert events[3].service_id.service_uuid.uuid == SERVICE_R1_R2_UUID - - assert isinstance(events[4], DeviceEvent) - assert events[4].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[4].device_id.device_uuid.uuid == DEVICE_R1_UUID - - assert isinstance(events[5], DeviceEvent) - assert events[5].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[5].device_id.device_uuid.uuid == DEVICE_R2_UUID - - assert isinstance(events[6], DeviceEvent) - assert events[6].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[6].device_id.device_uuid.uuid == DEVICE_R3_UUID - - assert isinstance(events[7], TopologyEvent) - assert events[7].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[7].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert events[7].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - - assert isinstance(events[8], ContextEvent) - assert events[8].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[8].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- - events_collector.stop() - - # ----- Dump state of database after remove the object ------------------------------------------------------------- - db_entries = context_database.dump() - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 0 - - -def test_grpc_policy( - context_client_grpc : ContextClient, # pylint: disable=redefined-outer-name - context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name - context_database = context_db_mb[0] - - # ----- Clean the database ----------------------------------------------------------------------------------------- - context_database.clear_all() - - # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- - #events_collector = EventsCollector(context_client_grpc) - #events_collector.start() - - # ----- Get when the object does not exist ------------------------------------------------------------------------- - POLICY_ID = 'no-uuid' - DEFAULT_POLICY_ID = {'uuid': {'uuid': POLICY_ID}} - - with pytest.raises(grpc.RpcError) as e: - context_client_grpc.GetPolicyRule(PolicyRuleId(**DEFAULT_POLICY_ID)) - - assert e.value.code() == grpc.StatusCode.NOT_FOUND - assert e.value.details() == 'PolicyRule({:s}) not found'.format(POLICY_ID) - - # ----- List when the object does not exist ------------------------------------------------------------------------ - response = context_client_grpc.ListPolicyRuleIds(Empty()) - assert len(response.policyRuleIdList) == 0 - - response = context_client_grpc.ListPolicyRules(Empty()) - assert len(response.policyRules) == 0 - - # ----- Dump state of database before create the object ------------------------------------------------------------ - db_entries = context_database.dump() - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 0 - - # ----- Create the object ------------------------------------------------------------------------------------------ - response = context_client_grpc.SetPolicyRule(PolicyRule(**POLICY_RULE)) - assert response.uuid.uuid == POLICY_RULE_UUID - - # ----- Check create event ----------------------------------------------------------------------------------------- - # events = events_collector.get_events(block=True, count=1) - # assert isinstance(events[0], PolicyEvent) - # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - # assert events[0].policy_id.uuid.uuid == POLICY_RULE_UUID - - # ----- Update the object ------------------------------------------------------------------------------------------ - response = context_client_grpc.SetPolicyRule(PolicyRule(**POLICY_RULE)) - assert response.uuid.uuid == POLICY_RULE_UUID - - # ----- Dump state of database after create/update the object ------------------------------------------------------ - db_entries = context_database.dump() - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 2 - - # ----- Get when the object exists --------------------------------------------------------------------------------- - response = context_client_grpc.GetPolicyRule(PolicyRuleId(**POLICY_RULE_ID)) - assert response.device.policyRuleBasic.policyRuleId.uuid.uuid == POLICY_RULE_UUID - - # ----- List when the object exists -------------------------------------------------------------------------------- - response = context_client_grpc.ListPolicyRuleIds(Empty()) - assert len(response.policyRuleIdList) == 1 - assert response.policyRuleIdList[0].uuid.uuid == POLICY_RULE_UUID - - response = context_client_grpc.ListPolicyRules(Empty()) - assert len(response.policyRules) == 1 - - # ----- Remove the object ------------------------------------------------------------------------------------------ - context_client_grpc.RemovePolicyRule(PolicyRuleId(**POLICY_RULE_ID)) - - # ----- Check remove event ----------------------------------------------------------------------------------------- - # events = events_collector.get_events(block=True, count=2) - - # assert isinstance(events[0], PolicyEvent) - # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - # assert events[0].policy_id.uuid.uuid == POLICY_RULE_UUID - - - # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- - # events_collector.stop() - - # ----- Dump state of database after remove the object ------------------------------------------------------------- - db_entries = context_database.dump() - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 0 - - - -# ----- Test misc. Context internal tools ------------------------------------------------------------------------------ - -def test_tools_fast_string_hasher(): - with pytest.raises(TypeError) as e: - fast_hasher(27) - assert str(e.value) == "data(27) must be " + FASTHASHER_DATA_ACCEPTED_FORMAT + ", found <class 'int'>" - - with pytest.raises(TypeError) as e: - fast_hasher({27}) - assert str(e.value) == "data({27}) must be " + FASTHASHER_DATA_ACCEPTED_FORMAT + ", found <class 'set'>" - - with pytest.raises(TypeError) as e: - fast_hasher({'27'}) - assert str(e.value) == "data({'27'}) must be " + FASTHASHER_DATA_ACCEPTED_FORMAT + ", found <class 'set'>" - - with pytest.raises(TypeError) as e: - fast_hasher([27]) - assert str(e.value) == "data[0](27) must be " + FASTHASHER_ITEM_ACCEPTED_FORMAT + ", found <class 'int'>" - - fast_hasher('hello-world') - fast_hasher('hello-world'.encode('UTF-8')) - fast_hasher(['hello', 'world']) - fast_hasher(('hello', 'world')) - fast_hasher(['hello'.encode('UTF-8'), 'world'.encode('UTF-8')]) - fast_hasher(('hello'.encode('UTF-8'), 'world'.encode('UTF-8'))) -""" \ No newline at end of file +from ._test_context import grpc_context +from ._test_topology import grpc_topology +from ._test_device import grpc_device +from ._test_link import grpc_link +#from ._test_service import grpc_service +#from ._test_slice import grpc_slice +#from ._test_connection import grpc_connection +#from ._test_policy import grpc_policy + +def test_grpc_context(context_client_grpc : ContextClient) -> None: + grpc_context(context_client_grpc) + +@pytest.mark.depends(on=['test_grpc_context']) +def test_grpc_topology(context_client_grpc : ContextClient) -> None: + grpc_topology(context_client_grpc) + +@pytest.mark.depends(on=['test_grpc_topology']) +def test_grpc_device(context_client_grpc : ContextClient) -> None: + grpc_device(context_client_grpc) + +@pytest.mark.depends(on=['test_grpc_device']) +def test_grpc_link(context_client_grpc : ContextClient) -> None: + grpc_link(context_client_grpc) + +#@pytest.mark.depends(on=['test_grpc_link']) +#def test_grpc_service(context_client_grpc : ContextClient) -> None: +# grpc_service(context_client_grpc) + +#@pytest.mark.depends(on=['test_grpc_service']) +#def test_grpc_slice(context_client_grpc : ContextClient) -> None: +# grpc_slice(context_client_grpc) + +#@pytest.mark.depends(on=['test_grpc_slice']) +#def test_grpc_connection(context_client_grpc : ContextClient) -> None: +# grpc_connection(context_client_grpc) + +#@pytest.mark.depends(on=['test_grpc_connection']) +#def test_grpc_policy(context_client_grpc : ContextClient) -> None: +# grpc_policy(context_client_grpc) -- GitLab From 5b1579a770f8f6c63894e61893ed1227bc46af80 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Wed, 4 Jan 2023 18:53:17 +0000 Subject: [PATCH 021/158] Common: - updated coveragerc template file - extended and improved rpc wrapper set of declarable exceptions - updated default context and topology definition constants --- coverage/.coveragerc.template | 4 ++-- src/common/Constants.py | 6 +++--- src/common/rpc_method_wrapper/ServiceExceptions.py | 12 ++++++++++-- src/common/tools/object_factory/Service.py | 8 ++++---- 4 files changed, 19 insertions(+), 11 deletions(-) diff --git a/coverage/.coveragerc.template b/coverage/.coveragerc.template index e5e634c2c..8863d4d6e 100644 --- a/coverage/.coveragerc.template +++ b/coverage/.coveragerc.template @@ -1,5 +1,5 @@ [run] -data_file = ~/teraflow/controller/coverage/.coverage +data_file = ~/tfs-ctrl/coverage/.coverage source = . omit = */proto/* @@ -12,7 +12,7 @@ exclude_lines = raise\ NotImplementedError [html] -directory = ~/teraflow/controller/coverage/html_report +directory = ~/tfs-ctrl/coverage/html_report [xml] output = ~/teraflow/controller/coverage/report.xml diff --git a/src/common/Constants.py b/src/common/Constants.py index d606c0d03..9f015b8c7 100644 --- a/src/common/Constants.py +++ b/src/common/Constants.py @@ -33,9 +33,9 @@ DEFAULT_METRICS_PORT = 9192 DEFAULT_CONTEXT_NAME = 'admin' DEFAULT_TOPOLOGY_NAME = 'admin' # contains the detailed local topology INTERDOMAIN_TOPOLOGY_NAME = 'inter' # contains the abstract inter-domain topology -DEFAULT_CONTEXT_UUID = str(uuid.uuid5(uuid.NAMESPACE_OID, DEFAULT_CONTEXT_NAME )) -DEFAULT_TOPOLOGY_UUID = str(uuid.uuid5(uuid.NAMESPACE_OID, DEFAULT_TOPOLOGY_NAME )) -INTERDOMAIN_TOPOLOGY_UUID = str(uuid.uuid5(uuid.NAMESPACE_OID, INTERDOMAIN_TOPOLOGY_NAME)) +#DEFAULT_CONTEXT_UUID = str(uuid.uuid5(uuid.NAMESPACE_OID, DEFAULT_CONTEXT_NAME )) +#DEFAULT_TOPOLOGY_UUID = str(uuid.uuid5(uuid.NAMESPACE_OID, DEFAULT_TOPOLOGY_NAME )) +#INTERDOMAIN_TOPOLOGY_UUID = str(uuid.uuid5(uuid.NAMESPACE_OID, INTERDOMAIN_TOPOLOGY_NAME)) # Default service names class ServiceNameEnum(Enum): diff --git a/src/common/rpc_method_wrapper/ServiceExceptions.py b/src/common/rpc_method_wrapper/ServiceExceptions.py index e516953c5..369565cf8 100644 --- a/src/common/rpc_method_wrapper/ServiceExceptions.py +++ b/src/common/rpc_method_wrapper/ServiceExceptions.py @@ -13,7 +13,7 @@ # limitations under the License. import grpc -from typing import Iterable, Union +from typing import Iterable, List, Tuple, Union class ServiceException(Exception): def __init__( @@ -21,7 +21,7 @@ class ServiceException(Exception): ) -> None: self.code = code if isinstance(extra_details, str): extra_details = [extra_details] - self.details = '; '.join(map(str, [details] + extra_details)) + self.details = '; '.join([str(item) for item in ([details] + extra_details)]) super().__init__(self.details) class NotFoundException(ServiceException): @@ -45,6 +45,14 @@ class InvalidArgumentException(ServiceException): details = '{:s}({:s}) is invalid'.format(str(argument_name), str(argument_value)) super().__init__(grpc.StatusCode.INVALID_ARGUMENT, details, extra_details=extra_details) +class InvalidArgumentsException(ServiceException): + def __init__( + self, arguments : List[Tuple[str, str]], extra_details : Union[str, Iterable[str]] = None + ) -> None: + str_arguments = ', '.join(['{:s}({:s})'.format(name, value) for name,value in arguments]) + details = 'Arguments {:s} are invalid'.format(str_arguments) + super().__init__(grpc.StatusCode.INVALID_ARGUMENT, details, extra_details=extra_details) + class OperationFailedException(ServiceException): def __init__( self, operation : str, extra_details : Union[str, Iterable[str]] = None diff --git a/src/common/tools/object_factory/Service.py b/src/common/tools/object_factory/Service.py index be8eefe5b..66785fbb4 100644 --- a/src/common/tools/object_factory/Service.py +++ b/src/common/tools/object_factory/Service.py @@ -14,7 +14,7 @@ import copy from typing import Dict, List, Optional -from common.Constants import DEFAULT_CONTEXT_UUID +from common.Constants import DEFAULT_CONTEXT_NAME from common.proto.context_pb2 import ServiceStatusEnum, ServiceTypeEnum from common.tools.object_factory.Context import json_context_id @@ -44,7 +44,7 @@ def json_service( def json_service_l3nm_planned( service_uuid : str, endpoint_ids : List[Dict] = [], constraints : List[Dict] = [], - config_rules : List[Dict] = [], context_uuid : str = DEFAULT_CONTEXT_UUID + config_rules : List[Dict] = [], context_uuid : str = DEFAULT_CONTEXT_NAME ): return json_service( @@ -54,7 +54,7 @@ def json_service_l3nm_planned( def json_service_tapi_planned( service_uuid : str, endpoint_ids : List[Dict] = [], constraints : List[Dict] = [], - config_rules : List[Dict] = [], context_uuid : str = DEFAULT_CONTEXT_UUID + config_rules : List[Dict] = [], context_uuid : str = DEFAULT_CONTEXT_NAME ): return json_service( @@ -64,7 +64,7 @@ def json_service_tapi_planned( def json_service_p4_planned( service_uuid : str, endpoint_ids : List[Dict] = [], constraints : List[Dict] = [], - config_rules : List[Dict] = [], context_uuid : str = DEFAULT_CONTEXT_UUID + config_rules : List[Dict] = [], context_uuid : str = DEFAULT_CONTEXT_NAME ): return json_service( -- GitLab From e719962bf18e4c8d8f3e65a731ca57e4e05686c1 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Wed, 4 Jan 2023 18:53:37 +0000 Subject: [PATCH 022/158] Proto: - added field "name" to endpoint --- proto/context.proto | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/proto/context.proto b/proto/context.proto index db0c81381..ce7534c80 100644 --- a/proto/context.proto +++ b/proto/context.proto @@ -407,9 +407,10 @@ message EndPointId { message EndPoint { EndPointId endpoint_id = 1; - string endpoint_type = 2; - repeated kpi_sample_types.KpiSampleType kpi_sample_types = 3; - Location endpoint_location = 4; + string name = 2; + string endpoint_type = 3; + repeated kpi_sample_types.KpiSampleType kpi_sample_types = 4; + Location endpoint_location = 5; } -- GitLab From d649fe785b3faac36b7cf3da94b10ce57d44c22a Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Wed, 4 Jan 2023 18:55:37 +0000 Subject: [PATCH 023/158] Context component: - cleaned up script run tests locally - temporarily added script to automate test & coverage reporting - reorganized unitary tests - migration in progress to use single-column primary-key for main entities - intermediate backup ; work in progress --- scripts/run_tests_locally-context.sh | 18 +- ...geFeedExample.py => ChangeFeedExample.txt} | 1 - src/context/service/Constants.py | 5 +- .../service/ContextServiceServicerImpl.py | 367 ++++-------------- src/context/service/Engine.py | 4 +- .../service/database/methods/Context.py | 67 ++-- .../service/database/methods/Device.py | 136 ++++--- .../service/database/methods/Service.py | 263 +++++++++++++ .../service/database/methods/Topology.py | 105 +++-- .../service/database/methods/uuids/Context.py | 33 ++ .../service/database/methods/uuids/Device.py | 33 ++ .../database/methods/uuids/EndPoint.py | 41 ++ .../service/database/methods/uuids/Link.py | 33 ++ .../database/methods/uuids/Topology.py | 37 ++ .../database/methods/uuids/_Builder.py | 44 +++ .../database/methods/uuids/__init__.py | 13 + .../database/models/ConfigRuleModel.py | 37 +- .../service/database/models/ContextModel.py | 15 +- .../service/database/models/DeviceModel.py | 16 +- .../service/database/models/EndPointModel.py | 64 +-- .../service/database/models/LinkModel.py | 1 + .../service/database/models/RelationModels.py | 148 +++---- .../service/database/models/ServiceModel.py | 130 ++----- .../service/database/models/TopologyModel.py | 18 +- .../database/models/enums/ServiceStatus.py | 26 ++ .../database/models/enums/ServiceType.py | 26 ++ src/context/tests/Objects.py | 30 +- .../{test_unitary.py => __test_unitary.py} | 34 +- src/context/tests/_test_link.py | 77 ++-- src/context/tests/_test_service.py | 191 ++++----- src/context/tests/conftest.py | 45 +-- .../{_test_context.py => test_context.py} | 102 ++--- .../tests/{_test_device.py => test_device.py} | 165 ++++---- .../{_test_topology.py => test_topology.py} | 140 +++---- test-context.sh | 53 +++ 35 files changed, 1407 insertions(+), 1111 deletions(-) rename src/context/service/{ChangeFeedExample.py => ChangeFeedExample.txt} (99%) create mode 100644 src/context/service/database/methods/Service.py create mode 100644 src/context/service/database/methods/uuids/Context.py create mode 100644 src/context/service/database/methods/uuids/Device.py create mode 100644 src/context/service/database/methods/uuids/EndPoint.py create mode 100644 src/context/service/database/methods/uuids/Link.py create mode 100644 src/context/service/database/methods/uuids/Topology.py create mode 100644 src/context/service/database/methods/uuids/_Builder.py create mode 100644 src/context/service/database/methods/uuids/__init__.py create mode 100644 src/context/service/database/models/enums/ServiceStatus.py create mode 100644 src/context/service/database/models/enums/ServiceType.py rename src/context/tests/{test_unitary.py => __test_unitary.py} (64%) rename src/context/tests/{_test_context.py => test_context.py} (55%) rename src/context/tests/{_test_device.py => test_device.py} (56%) rename src/context/tests/{_test_topology.py => test_topology.py} (57%) create mode 100755 test-context.sh diff --git a/scripts/run_tests_locally-context.sh b/scripts/run_tests_locally-context.sh index 5b6c53aa8..8b0c82b3e 100755 --- a/scripts/run_tests_locally-context.sh +++ b/scripts/run_tests_locally-context.sh @@ -20,8 +20,6 @@ # If not already set, set the name of the Kubernetes namespace to deploy to. export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"} -#export TFS_K8S_HOSTNAME="tfs-vm" - ######################################################################################################################## # Automated steps start here ######################################################################################################################## @@ -29,24 +27,14 @@ export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"} PROJECTDIR=`pwd` cd $PROJECTDIR/src -#RCFILE=$PROJECTDIR/coverage/.coveragerc - -#kubectl --namespace $TFS_K8S_NAMESPACE expose deployment contextservice --name=redis-tests --port=6379 --type=NodePort -#export REDIS_SERVICE_HOST=$(kubectl --namespace $TFS_K8S_NAMESPACE get service redis-tests -o 'jsonpath={.spec.clusterIP}') -#export REDIS_SERVICE_HOST=$(kubectl get node $TFS_K8S_HOSTNAME -o 'jsonpath={.status.addresses[?(@.type=="InternalIP")].address}') -#export REDIS_SERVICE_PORT=$(kubectl --namespace $TFS_K8S_NAMESPACE get service redis-tests -o 'jsonpath={.spec.ports[?(@.port==6379)].nodePort}') +RCFILE=$PROJECTDIR/coverage/.coveragerc #export CRDB_URI="cockroachdb://tfs:tfs123@127.0.0.1:26257/tfs_test?sslmode=require" export CRDB_URI="cockroachdb://tfs:tfs123@10.1.7.195:26257/tfs_test?sslmode=require" export PYTHONPATH=/home/tfs/tfs-ctrl/src # Run unitary tests and analyze coverage of code at same time -#coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose --maxfail=1 \ -# context/tests/test_unitary.py - -# --log-level=INFO -o log_cli=true --durations=0 -pytest --verbose --maxfail=1 \ +# helpful pytest flags: --log-level=INFO -o log_cli=true --verbose --maxfail=1 --durations=0 +coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose --maxfail=1 \ context/tests/test_unitary.py \ context/tests/test_hasher.py - -#kubectl --namespace $TFS_K8S_NAMESPACE delete service redis-tests diff --git a/src/context/service/ChangeFeedExample.py b/src/context/service/ChangeFeedExample.txt similarity index 99% rename from src/context/service/ChangeFeedExample.py rename to src/context/service/ChangeFeedExample.txt index 2bd46b546..679a7c716 100644 --- a/src/context/service/ChangeFeedExample.py +++ b/src/context/service/ChangeFeedExample.txt @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. - @safe_and_metered_rpc_method(METRICS, LOGGER) def GetContextEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[ContextEvent]: pass diff --git a/src/context/service/Constants.py b/src/context/service/Constants.py index 25790fe29..1eb274cf0 100644 --- a/src/context/service/Constants.py +++ b/src/context/service/Constants.py @@ -16,14 +16,15 @@ TOPIC_CONNECTION = 'connection' TOPIC_CONTEXT = 'context' TOPIC_DEVICE = 'device' TOPIC_LINK = 'link' -TOPIC_POLICY = 'policy' +#TOPIC_POLICY = 'policy' TOPIC_SERVICE = 'service' TOPIC_SLICE = 'slice' TOPIC_TOPOLOGY = 'topology' TOPICS = { TOPIC_CONNECTION, TOPIC_CONTEXT, TOPIC_DEVICE, TOPIC_LINK, - TOPIC_POLICY, TOPIC_SERVICE, TOPIC_SLICE, TOPIC_TOPOLOGY + #TOPIC_POLICY, + TOPIC_SERVICE, TOPIC_SLICE, TOPIC_TOPOLOGY } CONSUME_TIMEOUT = 0.5 # seconds diff --git a/src/context/service/ContextServiceServicerImpl.py b/src/context/service/ContextServiceServicerImpl.py index 5075d8889..44409bd0c 100644 --- a/src/context/service/ContextServiceServicerImpl.py +++ b/src/context/service/ContextServiceServicerImpl.py @@ -13,34 +13,36 @@ # limitations under the License. -import grpc, json, logging, operator, sqlalchemy, threading, time, uuid -from sqlalchemy.orm import Session, contains_eager, selectinload, sessionmaker -from sqlalchemy.dialects.postgresql import UUID, insert -from typing import Dict, Iterator, List, Optional, Set, Tuple, Union - +import grpc, json, logging, sqlalchemy +#from sqlalchemy.orm import Session, contains_eager, selectinload, sessionmaker +#from sqlalchemy.dialects.postgresql import UUID, insert +from typing import Iterator from common.message_broker.MessageBroker import MessageBroker #from common.orm.backend.Tools import key_to_str from common.proto.context_pb2 import ( Connection, ConnectionEvent, ConnectionId, ConnectionIdList, ConnectionList, Context, ContextEvent, ContextId, ContextIdList, ContextList, Device, DeviceEvent, DeviceId, DeviceIdList, DeviceList, - Empty, EventTypeEnum, + Empty, Link, LinkEvent, LinkId, LinkIdList, LinkList, Service, ServiceEvent, ServiceId, ServiceIdList, ServiceList, Slice, SliceEvent, SliceId, SliceIdList, SliceList, - Topology, TopologyEvent, TopologyId, TopologyIdList, TopologyList, - ConfigActionEnum, Constraint) + Topology, TopologyEvent, TopologyId, TopologyIdList, TopologyList) #from common.proto.policy_pb2 import PolicyRuleIdList, PolicyRuleId, PolicyRuleList, PolicyRule from common.proto.context_pb2_grpc import ContextServiceServicer from common.proto.context_policy_pb2_grpc import ContextPolicyServiceServicer -from common.tools.object_factory.Context import json_context_id +#from common.tools.object_factory.Context import json_context_id from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method -from common.rpc_method_wrapper.ServiceExceptions import ( - InvalidArgumentException, NotFoundException, OperationFailedException) -from context.service.database.methods.Context import context_delete, context_get, context_list_ids, context_list_objs, context_set -from context.service.database.methods.Device import device_delete, device_get, device_list_ids, device_list_objs, device_set -from context.service.database.methods.Link import link_delete, link_get, link_list_ids, link_list_objs, link_set -from context.service.database.methods.Topology import topology_delete, topology_get, topology_list_ids, topology_list_objs, topology_set +#from common.rpc_method_wrapper.ServiceExceptions import ( +# InvalidArgumentException, NotFoundException, OperationFailedException) +from .database.methods.Context import ( + context_delete, context_get, context_list_ids, context_list_objs, context_set) +from .database.methods.Device import ( + device_delete, device_get, device_list_ids, device_list_objs, device_set) +#from .database.methods.Link import link_delete, link_get, link_list_ids, link_list_objs, link_set +#from .database.methods.Service import service_delete, service_get, service_list_ids, service_list_objs, service_set +from .database.methods.Topology import ( + topology_delete, topology_get, topology_list_ids, topology_list_objs, topology_set) #from common.tools.grpc.Tools import grpc_message_to_json, grpc_message_to_json_string #from context.service.Database import Database #from context.service.database.ConfigModel import ( @@ -64,8 +66,8 @@ from context.service.database.methods.Topology import topology_delete, topology_ #from context.service.database.SliceModel import SliceModel, grpc_to_enum__slice_status #from context.service.database.TopologyModel import TopologyModel from .Constants import ( - CONSUME_TIMEOUT, TOPIC_CONNECTION, TOPIC_CONTEXT, TOPIC_DEVICE, TOPIC_LINK, TOPIC_POLICY, TOPIC_SERVICE, - TOPIC_SLICE, TOPIC_TOPOLOGY) + CONSUME_TIMEOUT, TOPIC_CONNECTION, TOPIC_CONTEXT, TOPIC_DEVICE, TOPIC_LINK, #TOPIC_POLICY, + TOPIC_SERVICE, TOPIC_SLICE, TOPIC_TOPOLOGY) #from .ChangeFeedClient import ChangeFeedClient LOGGER = logging.getLogger(__name__) @@ -110,10 +112,10 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer @safe_and_metered_rpc_method(METRICS, LOGGER) def SetContext(self, request : Context, context : grpc.ServicerContext) -> ContextId: - updated = context_set(self.db_engine, request) + context_id,updated = context_set(self.db_engine, request) #event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE - #notify_event(self.messagebroker, TOPIC_CONTEXT, event_type, {'context_id': request.context_id}) - return request.context_id + #notify_event(self.messagebroker, TOPIC_CONTEXT, event_type, {'context_id': context_id}) + return context_id @safe_and_metered_rpc_method(METRICS, LOGGER) def RemoveContext(self, request : ContextId, context : grpc.ServicerContext) -> Empty: @@ -144,10 +146,10 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer @safe_and_metered_rpc_method(METRICS, LOGGER) def SetTopology(self, request : Topology, context : grpc.ServicerContext) -> TopologyId: - updated = topology_set(self.db_engine, request) + topology_id,updated = topology_set(self.db_engine, request) #event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE - #notify_event(self.messagebroker, TOPIC_TOPOLOGY, event_type, {'topology_id': request.topology_id}) - return request.topology_id + #notify_event(self.messagebroker, TOPIC_TOPOLOGY, event_type, {'topology_id': topology_id}) + return topology_id @safe_and_metered_rpc_method(METRICS, LOGGER) def RemoveTopology(self, request : TopologyId, context : grpc.ServicerContext) -> Empty: @@ -178,10 +180,10 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer @safe_and_metered_rpc_method(METRICS, LOGGER) def SetDevice(self, request : Device, context : grpc.ServicerContext) -> DeviceId: - updated = device_set(self.db_engine, request) + device_id,updated = device_set(self.db_engine, request) #event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE - #notify_event(self.messagebroker, TOPIC_DEVICE, event_type, {'device_id': request.device_id}) - return request.device_id + #notify_event(self.messagebroker, TOPIC_DEVICE, event_type, {'device_id': device_id}) + return device_id @safe_and_metered_rpc_method(METRICS, LOGGER) def RemoveDevice(self, request : DeviceId, context : grpc.ServicerContext) -> Empty: @@ -198,31 +200,31 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # ----- Link ------------------------------------------------------------------------------------------------------- - @safe_and_metered_rpc_method(METRICS, LOGGER) - def ListLinkIds(self, request : Empty, context : grpc.ServicerContext) -> LinkIdList: - return link_list_ids(self.db_engine) +# @safe_and_metered_rpc_method(METRICS, LOGGER) +# def ListLinkIds(self, request : Empty, context : grpc.ServicerContext) -> LinkIdList: +# return link_list_ids(self.db_engine) - @safe_and_metered_rpc_method(METRICS, LOGGER) - def ListLinks(self, request : Empty, context : grpc.ServicerContext) -> LinkList: - return link_list_objs(self.db_engine) +# @safe_and_metered_rpc_method(METRICS, LOGGER) +# def ListLinks(self, request : Empty, context : grpc.ServicerContext) -> LinkList: +# return link_list_objs(self.db_engine) - @safe_and_metered_rpc_method(METRICS, LOGGER) - def GetLink(self, request : LinkId, context : grpc.ServicerContext) -> Link: - return link_get(self.db_engine, request) +# @safe_and_metered_rpc_method(METRICS, LOGGER) +# def GetLink(self, request : LinkId, context : grpc.ServicerContext) -> Link: +# return link_get(self.db_engine, request) - @safe_and_metered_rpc_method(METRICS, LOGGER) - def SetLink(self, request : Link, context : grpc.ServicerContext) -> LinkId: - updated = link_set(self.db_engine, request) - #event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE - #notify_event(self.messagebroker, TOPIC_LINK, event_type, {'link_id': request.link_id}) - return request.link_id +# @safe_and_metered_rpc_method(METRICS, LOGGER) +# def SetLink(self, request : Link, context : grpc.ServicerContext) -> LinkId: +# link_id,updated = link_set(self.db_engine, request) +# #event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE +# #notify_event(self.messagebroker, TOPIC_LINK, event_type, {'link_id': link_id}) +# return link_id - @safe_and_metered_rpc_method(METRICS, LOGGER) - def RemoveLink(self, request : LinkId, context : grpc.ServicerContext) -> Empty: - deleted = link_delete(self.db_engine, request) - #if deleted: - # notify_event(self.messagebroker, TOPIC_LINK, event_type, {'link_id': dict_link_id}) - return Empty() +# @safe_and_metered_rpc_method(METRICS, LOGGER) +# def RemoveLink(self, request : LinkId, context : grpc.ServicerContext) -> Empty: +# deleted = link_delete(self.db_engine, request) +# #if deleted: +# # notify_event(self.messagebroker, TOPIC_LINK, event_type, {'link_id': dict_link_id}) +# return Empty() @safe_and_metered_rpc_method(METRICS, LOGGER) def GetLinkEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[LinkEvent]: @@ -230,230 +232,33 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer yield LinkEvent(**json.loads(message.content)) -# # ----- Service ---------------------------------------------------------------------------------------------------- -# + # ----- Service ---------------------------------------------------------------------------------------------------- + # @safe_and_metered_rpc_method(METRICS, LOGGER) # def ListServiceIds(self, request : ContextId, context : grpc.ServicerContext) -> ServiceIdList: -# context_uuid = request.context_uuid.uuid -# -# with self.session() as session: -# db_services = session.query(ServiceModel).filter_by(context_uuid=context_uuid).all() -# return ServiceIdList(service_ids=[db_service.dump_id() for db_service in db_services]) -# +# return service_list_ids(self.db_engine, request) + # @safe_and_metered_rpc_method(METRICS, LOGGER) # def ListServices(self, request : ContextId, context : grpc.ServicerContext) -> ServiceList: -# context_uuid = request.context_uuid.uuid -# -# with self.session() as session: -# db_services = session.query(ServiceModel).filter_by(context_uuid=context_uuid).all() -# return ServiceList(services=[db_service.dump() for db_service in db_services]) -# -# -# +# return service_list_objs(self.db_engine, request) + # @safe_and_metered_rpc_method(METRICS, LOGGER) # def GetService(self, request : ServiceId, context : grpc.ServicerContext) -> Service: -# service_uuid = request.service_uuid.uuid -# with self.session() as session: -# result = session.query(ServiceModel).filter_by(service_uuid=service_uuid).one_or_none() -# -# if not result: -# raise NotFoundException(ServiceModel.__name__.replace('Model', ''), service_uuid) -# -# return Service(**result.dump()) -# -# def set_constraint(self, db_constraints: ConstraintsModel, grpc_constraint: Constraint, position: int -# ) -> Tuple[Union_ConstraintModel, bool]: -# with self.session() as session: -# -# grpc_constraint_kind = str(grpc_constraint.WhichOneof('constraint')) -# -# parser = CONSTRAINT_PARSERS.get(grpc_constraint_kind) -# if parser is None: -# raise NotImplementedError('Constraint of kind {:s} is not implemented: {:s}'.format( -# grpc_constraint_kind, grpc_message_to_json_string(grpc_constraint))) -# -# # create specific constraint -# constraint_class, str_constraint_id, constraint_data, constraint_kind = parser(grpc_constraint) -# str_constraint_id = str(uuid.uuid4()) -# LOGGER.info('str_constraint_id: {}'.format(str_constraint_id)) -# # str_constraint_key_hash = fast_hasher(':'.join([constraint_kind.value, str_constraint_id])) -# # str_constraint_key = key_to_str([db_constraints.pk, str_constraint_key_hash], separator=':') -# -# # result : Tuple[Union_ConstraintModel, bool] = update_or_create_object( -# # database, constraint_class, str_constraint_key, constraint_data) -# constraint_data[constraint_class.main_pk_name()] = str_constraint_id -# db_new_constraint = constraint_class(**constraint_data) -# result: Tuple[Union_ConstraintModel, bool] = self.database.create_or_update(db_new_constraint) -# db_specific_constraint, updated = result -# -# # create generic constraint -# # constraint_fk_field_name = 'constraint_uuid'.format(constraint_kind.value) -# constraint_data = { -# 'constraints_uuid': db_constraints.constraints_uuid, 'position': position, 'kind': constraint_kind -# } -# -# db_new_constraint = ConstraintModel(**constraint_data) -# result: Tuple[Union_ConstraintModel, bool] = self.database.create_or_update(db_new_constraint) -# db_constraint, updated = result -# -# return db_constraint, updated -# -# def set_constraints(self, service_uuid: str, constraints_name : str, grpc_constraints -# ) -> List[Tuple[Union[ConstraintsModel, ConstraintModel], bool]]: -# with self.session() as session: -# # str_constraints_key = key_to_str([db_parent_pk, constraints_name], separator=':') -# # result : Tuple[ConstraintsModel, bool] = get_or_create_object(database, ConstraintsModel, str_constraints_key) -# result = session.query(ConstraintsModel).filter_by(constraints_uuid=service_uuid).one_or_none() -# created = None -# if result: -# created = True -# session.query(ConstraintsModel).filter_by(constraints_uuid=service_uuid).one_or_none() -# db_constraints = ConstraintsModel(constraints_uuid=service_uuid) -# session.add(db_constraints) -# -# db_objects = [(db_constraints, created)] -# -# for position,grpc_constraint in enumerate(grpc_constraints): -# result : Tuple[ConstraintModel, bool] = self.set_constraint( -# db_constraints, grpc_constraint, position) -# db_constraint, updated = result -# db_objects.append((db_constraint, updated)) -# -# return db_objects -# +# return service_get(self.db_engine, request) + # @safe_and_metered_rpc_method(METRICS, LOGGER) # def SetService(self, request : Service, context : grpc.ServicerContext) -> ServiceId: -# with self.lock: -# with self.session() as session: -# -# context_uuid = request.service_id.context_id.context_uuid.uuid -# # db_context : ContextModel = get_object(self.database, ContextModel, context_uuid) -# db_context = session.query(ContextModel).filter_by(context_uuid=context_uuid).one_or_none() -# -# for i,endpoint_id in enumerate(request.service_endpoint_ids): -# endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid -# if len(endpoint_topology_context_uuid) > 0 and context_uuid != endpoint_topology_context_uuid: -# raise InvalidArgumentException( -# 'request.service_endpoint_ids[{:d}].topology_id.context_id.context_uuid.uuid'.format(i), -# endpoint_topology_context_uuid, -# ['should be == {:s}({:s})'.format( -# 'request.service_id.context_id.context_uuid.uuid', context_uuid)]) -# -# service_uuid = request.service_id.service_uuid.uuid -# # str_service_key = key_to_str([context_uuid, service_uuid]) -# -# constraints_result = self.set_constraints(service_uuid, 'constraints', request.service_constraints) -# db_constraints = constraints_result[0][0] -# -# config_rules = grpc_config_rules_to_raw(request.service_config.config_rules) -# running_config_result = update_config(self.database, str_service_key, 'running', config_rules) -# db_running_config = running_config_result[0][0] -# -# result : Tuple[ServiceModel, bool] = update_or_create_object(self.database, ServiceModel, str_service_key, { -# 'context_fk' : db_context, -# 'service_uuid' : service_uuid, -# 'service_type' : grpc_to_enum__service_type(request.service_type), -# 'service_constraints_fk': db_constraints, -# 'service_status' : grpc_to_enum__service_status(request.service_status.service_status), -# 'service_config_fk' : db_running_config, -# }) -# db_service, updated = result -# -# for i,endpoint_id in enumerate(request.service_endpoint_ids): -# endpoint_uuid = endpoint_id.endpoint_uuid.uuid -# endpoint_device_uuid = endpoint_id.device_id.device_uuid.uuid -# endpoint_topology_uuid = endpoint_id.topology_id.topology_uuid.uuid -# endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid -# -# str_endpoint_key = key_to_str([endpoint_device_uuid, endpoint_uuid]) -# if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0: -# str_topology_key = key_to_str([endpoint_topology_context_uuid, endpoint_topology_uuid]) -# str_endpoint_key = key_to_str([str_endpoint_key, str_topology_key], separator=':') -# -# db_endpoint : EndPointModel = get_object(self.database, EndPointModel, str_endpoint_key) -# -# str_service_endpoint_key = key_to_str([service_uuid, str_endpoint_key], separator='--') -# result : Tuple[ServiceEndPointModel, bool] = get_or_create_object( -# self.database, ServiceEndPointModel, str_service_endpoint_key, { -# 'service_fk': db_service, 'endpoint_fk': db_endpoint}) -# #db_service_endpoint, service_endpoint_created = result -# -# event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE -# dict_service_id = db_service.dump_id() -# notify_event(self.messagebroker, TOPIC_SERVICE, event_type, {'service_id': dict_service_id}) -# return ServiceId(**dict_service_id) -# context_uuid = request.service_id.context_id.context_uuid.uuid -# db_context : ContextModel = get_object(self.database, ContextModel, context_uuid) -# -# for i,endpoint_id in enumerate(request.service_endpoint_ids): -# endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid -# if len(endpoint_topology_context_uuid) > 0 and context_uuid != endpoint_topology_context_uuid: -# raise InvalidArgumentException( -# 'request.service_endpoint_ids[{:d}].topology_id.context_id.context_uuid.uuid'.format(i), -# endpoint_topology_context_uuid, -# ['should be == {:s}({:s})'.format( -# 'request.service_id.context_id.context_uuid.uuid', context_uuid)]) -# -# service_uuid = request.service_id.service_uuid.uuid -# str_service_key = key_to_str([context_uuid, service_uuid]) -# -# constraints_result = set_constraints( -# self.database, str_service_key, 'service', request.service_constraints) -# db_constraints = constraints_result[0][0] -# -# running_config_rules = update_config( -# self.database, str_service_key, 'service', request.service_config.config_rules) -# db_running_config = running_config_rules[0][0] -# -# result : Tuple[ServiceModel, bool] = update_or_create_object(self.database, ServiceModel, str_service_key, { -# 'context_fk' : db_context, -# 'service_uuid' : service_uuid, -# 'service_type' : grpc_to_enum__service_type(request.service_type), -# 'service_constraints_fk': db_constraints, -# 'service_status' : grpc_to_enum__service_status(request.service_status.service_status), -# 'service_config_fk' : db_running_config, -# }) -# db_service, updated = result -# -# for i,endpoint_id in enumerate(request.service_endpoint_ids): -# endpoint_uuid = endpoint_id.endpoint_uuid.uuid -# endpoint_device_uuid = endpoint_id.device_id.device_uuid.uuid -# endpoint_topology_uuid = endpoint_id.topology_id.topology_uuid.uuid -# endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid -# -# str_endpoint_key = key_to_str([endpoint_device_uuid, endpoint_uuid]) -# if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0: -# str_topology_key = key_to_str([endpoint_topology_context_uuid, endpoint_topology_uuid]) -# str_endpoint_key = key_to_str([str_endpoint_key, str_topology_key], separator=':') -# -# db_endpoint : EndPointModel = get_object(self.database, EndPointModel, str_endpoint_key) -# -# str_service_endpoint_key = key_to_str([service_uuid, str_endpoint_key], separator='--') -# result : Tuple[ServiceEndPointModel, bool] = get_or_create_object( -# self.database, ServiceEndPointModel, str_service_endpoint_key, { -# 'service_fk': db_service, 'endpoint_fk': db_endpoint}) -# #db_service_endpoint, service_endpoint_created = result -# -# event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE -# dict_service_id = db_service.dump_id() -# notify_event(self.messagebroker, TOPIC_SERVICE, event_type, {'service_id': dict_service_id}) -# return ServiceId(**dict_service_id) -# +# service_id,updated = service_set(self.db_engine, request) +# #event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE +# #notify_event(self.messagebroker, TOPIC_SERVICE, event_type, {'service_id': service_id}) +# return service_id + # @safe_and_metered_rpc_method(METRICS, LOGGER) # def RemoveService(self, request : ServiceId, context : grpc.ServicerContext) -> Empty: -# with self.lock: -# context_uuid = request.context_id.context_uuid.uuid -# service_uuid = request.service_uuid.uuid -# db_service = ServiceModel(self.database, key_to_str([context_uuid, service_uuid]), auto_load=False) -# found = db_service.load() -# if not found: return Empty() -# -# dict_service_id = db_service.dump_id() -# db_service.delete() -# -# event_type = EventTypeEnum.EVENTTYPE_REMOVE -# notify_event(self.messagebroker, TOPIC_SERVICE, event_type, {'service_id': dict_service_id}) -# return Empty() +# deleted = service_delete(self.db_engine, request) +# #if deleted: +# # notify_event(self.messagebroker, TOPIC_SERVICE, EventTypeEnum.EVENTTYPE_REMOVE, {'service_id': request}) +# return Empty() @safe_and_metered_rpc_method(METRICS, LOGGER) def GetServiceEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[ServiceEvent]: @@ -461,8 +266,8 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer yield ServiceEvent(**json.loads(message.content)) -# # ----- Slice ---------------------------------------------------------------------------------------------------- -# + # ----- Slice ---------------------------------------------------------------------------------------------------- + # @safe_and_metered_rpc_method(METRICS, LOGGER) # def ListSliceIds(self, request : ContextId, context : grpc.ServicerContext) -> SliceIdList: # with self.lock: @@ -470,7 +275,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # db_slices : Set[SliceModel] = get_related_objects(db_context, SliceModel) # db_slices = sorted(db_slices, key=operator.attrgetter('pk')) # return SliceIdList(slice_ids=[db_slice.dump_id() for db_slice in db_slices]) -# + # @safe_and_metered_rpc_method(METRICS, LOGGER) # def ListSlices(self, request : ContextId, context : grpc.ServicerContext) -> SliceList: # with self.lock: @@ -478,7 +283,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # db_slices : Set[SliceModel] = get_related_objects(db_context, SliceModel) # db_slices = sorted(db_slices, key=operator.attrgetter('pk')) # return SliceList(slices=[db_slice.dump() for db_slice in db_slices]) -# + # @safe_and_metered_rpc_method(METRICS, LOGGER) # def GetSlice(self, request : SliceId, context : grpc.ServicerContext) -> Slice: # with self.lock: @@ -487,7 +292,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # return Slice(**db_slice.dump( # include_endpoint_ids=True, include_constraints=True, include_config_rules=True, # include_service_ids=True, include_subslice_ids=True)) -# + # @safe_and_metered_rpc_method(METRICS, LOGGER) # def SetSlice(self, request : Slice, context : grpc.ServicerContext) -> SliceId: # with self.lock: @@ -572,7 +377,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # dict_slice_id = db_slice.dump_id() # notify_event(self.messagebroker, TOPIC_SLICE, event_type, {'slice_id': dict_slice_id}) # return SliceId(**dict_slice_id) -# + # @safe_and_metered_rpc_method(METRICS, LOGGER) # def UnsetSlice(self, request : Slice, context : grpc.ServicerContext) -> SliceId: # with self.lock: @@ -621,7 +426,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # dict_slice_id = db_slice.dump_id() # notify_event(self.messagebroker, TOPIC_SLICE, event_type, {'slice_id': dict_slice_id}) # return SliceId(**dict_slice_id) -# + # @safe_and_metered_rpc_method(METRICS, LOGGER) # def RemoveSlice(self, request : SliceId, context : grpc.ServicerContext) -> Empty: # with self.lock: @@ -644,8 +449,8 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer yield SliceEvent(**json.loads(message.content)) -# # ----- Connection ------------------------------------------------------------------------------------------------- -# + # ----- Connection ------------------------------------------------------------------------------------------------- + # @safe_and_metered_rpc_method(METRICS, LOGGER) # def ListConnectionIds(self, request : ServiceId, context : grpc.ServicerContext) -> ConnectionIdList: # with self.session() as session: @@ -658,7 +463,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # db_connections : Set[ConnectionModel] = get_related_objects(db_service, ConnectionModel) # db_connections = sorted(db_connections, key=operator.attrgetter('pk')) # return ConnectionIdList(connection_ids=[db_connection.dump_id() for db_connection in db_connections]) -# + # @safe_and_metered_rpc_method(METRICS, LOGGER) # def ListConnections(self, request : ContextId, context : grpc.ServicerContext) -> ServiceList: # with self.lock: @@ -667,13 +472,13 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # db_connections : Set[ConnectionModel] = get_related_objects(db_service, ConnectionModel) # db_connections = sorted(db_connections, key=operator.attrgetter('pk')) # return ConnectionList(connections=[db_connection.dump() for db_connection in db_connections]) -# + # @safe_and_metered_rpc_method(METRICS, LOGGER) # def GetConnection(self, request : ConnectionId, context : grpc.ServicerContext) -> Connection: # with self.lock: # db_connection : ConnectionModel = get_object(self.database, ConnectionModel, request.connection_uuid.uuid) # return Connection(**db_connection.dump(include_path=True, include_sub_service_ids=True)) -# + # @safe_and_metered_rpc_method(METRICS, LOGGER) # def SetConnection(self, request : Connection, context : grpc.ServicerContext) -> ConnectionId: # with self.lock: @@ -712,7 +517,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # dict_connection_id = db_connection.dump_id() # notify_event(self.messagebroker, TOPIC_CONNECTION, event_type, {'connection_id': dict_connection_id}) # return ConnectionId(**dict_connection_id) -# + # @safe_and_metered_rpc_method(METRICS, LOGGER) # def RemoveConnection(self, request : ConnectionId, context : grpc.ServicerContext) -> Empty: # with self.lock: @@ -733,29 +538,29 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer yield ConnectionEvent(**json.loads(message.content)) -# # ----- Policy ----------------------------------------------------------------------------------------------------- -# + # ----- Policy ----------------------------------------------------------------------------------------------------- + # @safe_and_metered_rpc_method(METRICS, LOGGER) # def ListPolicyRuleIds(self, request : Empty, context: grpc.ServicerContext) -> PolicyRuleIdList: # with self.lock: # db_policy_rules: List[PolicyRuleModel] = get_all_objects(self.database, PolicyRuleModel) # db_policy_rules = sorted(db_policy_rules, key=operator.attrgetter('pk')) # return PolicyRuleIdList(policyRuleIdList=[db_policy_rule.dump_id() for db_policy_rule in db_policy_rules]) -# + # @safe_and_metered_rpc_method(METRICS, LOGGER) # def ListPolicyRules(self, request : Empty, context: grpc.ServicerContext) -> PolicyRuleList: # with self.lock: # db_policy_rules: List[PolicyRuleModel] = get_all_objects(self.database, PolicyRuleModel) # db_policy_rules = sorted(db_policy_rules, key=operator.attrgetter('pk')) # return PolicyRuleList(policyRules=[db_policy_rule.dump() for db_policy_rule in db_policy_rules]) -# + # @safe_and_metered_rpc_method(METRICS, LOGGER) # def GetPolicyRule(self, request : PolicyRuleId, context: grpc.ServicerContext) -> PolicyRule: # with self.lock: # policy_rule_uuid = request.uuid.uuid # db_policy_rule: PolicyRuleModel = get_object(self.database, PolicyRuleModel, policy_rule_uuid) # return PolicyRule(**db_policy_rule.dump()) -# + # @safe_and_metered_rpc_method(METRICS, LOGGER) # def SetPolicyRule(self, request : PolicyRule, context: grpc.ServicerContext) -> PolicyRuleId: # with self.lock: @@ -764,13 +569,13 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # policy_rule_uuid = policy_rule_json[policy_rule_type]['policyRuleBasic']['policyRuleId']['uuid']['uuid'] # result: Tuple[PolicyRuleModel, bool] = update_or_create_object( # self.database, PolicyRuleModel, policy_rule_uuid, {'value': json.dumps(policy_rule_json)}) -# db_policy, updated = result # pylint: disable=unused-variable +# db_policy, updated = result # # #event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE # dict_policy_id = db_policy.dump_id() # #notify_event(self.messagebroker, TOPIC_POLICY, event_type, {"policy_id": dict_policy_id}) # return PolicyRuleId(**dict_policy_id) -# + # @safe_and_metered_rpc_method(METRICS, LOGGER) # def RemovePolicyRule(self, request : PolicyRuleId, context: grpc.ServicerContext) -> Empty: # with self.lock: diff --git a/src/context/service/Engine.py b/src/context/service/Engine.py index 151f33751..a1aedc3ae 100644 --- a/src/context/service/Engine.py +++ b/src/context/service/Engine.py @@ -28,13 +28,13 @@ class Engine: try: engine = sqlalchemy.create_engine( crdb_uri, connect_args={'application_name': APP_NAME}, echo=ECHO, future=True) - except: # pylint: disable=bare-except + except: # pylint: disable=bare-except # pragma: no cover LOGGER.exception('Failed to connect to database: {:s}'.format(crdb_uri)) return None try: Engine.create_database(engine) - except: # pylint: disable=bare-except + except: # pylint: disable=bare-except # pragma: no cover LOGGER.exception('Failed to check/create to database: {:s}'.format(engine.url)) return None diff --git a/src/context/service/database/methods/Context.py b/src/context/service/database/methods/Context.py index 8f1c2ee23..fc53426e3 100644 --- a/src/context/service/database/methods/Context.py +++ b/src/context/service/database/methods/Context.py @@ -12,15 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -import time +import logging from sqlalchemy.dialects.postgresql import insert from sqlalchemy.engine import Engine from sqlalchemy.orm import Session, sessionmaker from sqlalchemy_cockroachdb import run_transaction -from typing import Dict, List, Optional +from typing import Dict, List, Optional, Tuple from common.proto.context_pb2 import Context, ContextId, ContextIdList, ContextList -from common.rpc_method_wrapper.ServiceExceptions import InvalidArgumentException, NotFoundException +from common.rpc_method_wrapper.ServiceExceptions import NotFoundException +from common.tools.object_factory.Context import json_context_id from context.service.database.models.ContextModel import ContextModel +from .uuids.Context import context_get_uuid + +LOGGER = logging.getLogger(__name__) def context_list_ids(db_engine : Engine) -> ContextIdList: def callback(session : Session) -> List[Dict]: @@ -37,46 +41,44 @@ def context_list_objs(db_engine : Engine) -> ContextList: return ContextList(contexts=run_transaction(sessionmaker(bind=db_engine), callback)) def context_get(db_engine : Engine, request : ContextId) -> Context: - context_uuid = request.context_uuid.uuid + context_uuid = context_get_uuid(request, allow_random=False) def callback(session : Session) -> Optional[Dict]: obj : Optional[ContextModel] = session.query(ContextModel)\ .filter_by(context_uuid=context_uuid).one_or_none() return None if obj is None else obj.dump() obj = run_transaction(sessionmaker(bind=db_engine), callback) - if obj is None: raise NotFoundException('Context', context_uuid) + if obj is None: + raw_context_uuid = request.context_uuid.uuid + raise NotFoundException('Context', raw_context_uuid, extra_details=[ + 'context_uuid generated was: {:s}'.format(context_uuid) + ]) return Context(**obj) -def context_set(db_engine : Engine, request : Context) -> bool: - context_uuid = request.context_id.context_uuid.uuid +def context_set(db_engine : Engine, request : Context) -> Tuple[ContextId, bool]: context_name = request.name + if len(context_name) == 0: context_name = request.context_id.context_uuid.uuid + context_uuid = context_get_uuid(request.context_id, context_name=context_name, allow_random=True) + + # Ignore request.topology_ids, request.service_ids, and request.slice_ids. They are used + # for retrieving topologies, services and slices added into the context. Explicit addition + # into the context is done automatically qhen creating the topology, service or slice + # specifying the associated context. + + if len(request.topology_ids) > 0: # pragma: no cover + LOGGER.warning('Items in field "topology_ids" ignored. This field is used for retrieval purposes only.') - for i, topology_id in enumerate(request.topology_ids): - topology_context_uuid = topology_id.context_id.context_uuid.uuid - if topology_context_uuid != context_uuid: - raise InvalidArgumentException( - 'request.topology_ids[{:d}].context_id.context_uuid.uuid'.format(i), topology_context_uuid, - ['should be == {:s}({:s})'.format('request.context_id.context_uuid.uuid', context_uuid)]) + if len(request.service_ids) > 0: # pragma: no cover + LOGGER.warning('Items in field "service_ids" ignored. This field is used for retrieval purposes only.') - for i, service_id in enumerate(request.service_ids): - service_context_uuid = service_id.context_id.context_uuid.uuid - if service_context_uuid != context_uuid: - raise InvalidArgumentException( - 'request.service_ids[{:d}].context_id.context_uuid.uuid'.format(i), service_context_uuid, - ['should be == {:s}({:s})'.format('request.context_id.context_uuid.uuid', context_uuid)]) + if len(request.slice_ids) > 0: # pragma: no cover + LOGGER.warning('Items in field "slice_ids" ignored. This field is used for retrieval purposes only.') - for i, slice_id in enumerate(request.slice_ids): - slice_context_uuid = slice_id.context_id.context_uuid.uuid - if slice_context_uuid != context_uuid: - raise InvalidArgumentException( - 'request.slice_ids[{:d}].context_id.context_uuid.uuid'.format(i), slice_context_uuid, - ['should be == {:s}({:s})'.format('request.context_id.context_uuid.uuid', context_uuid)]) + context_data = [{ + 'context_uuid': context_uuid, + 'context_name': context_name, + }] def callback(session : Session) -> None: - context_data = [{ - 'context_uuid': context_uuid, - 'context_name': context_name, - 'created_at' : time.time(), - }] stmt = insert(ContextModel).values(context_data) stmt = stmt.on_conflict_do_update( index_elements=[ContextModel.context_uuid], @@ -85,10 +87,11 @@ def context_set(db_engine : Engine, request : Context) -> bool: session.execute(stmt) run_transaction(sessionmaker(bind=db_engine), callback) - return False # TODO: improve and check if created/updated + updated = False # TODO: improve and check if created/updated + return ContextId(**json_context_id(context_uuid)),updated def context_delete(db_engine : Engine, request : ContextId) -> bool: - context_uuid = request.context_uuid.uuid + context_uuid = context_get_uuid(request, allow_random=False) def callback(session : Session) -> bool: num_deleted = session.query(ContextModel).filter_by(context_uuid=context_uuid).delete() return num_deleted > 0 diff --git a/src/context/service/database/methods/Device.py b/src/context/service/database/methods/Device.py index e7dc3dadb..39ae98de0 100644 --- a/src/context/service/database/methods/Device.py +++ b/src/context/service/database/methods/Device.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import time from sqlalchemy import delete from sqlalchemy.dialects.postgresql import insert from sqlalchemy.engine import Engine @@ -21,15 +20,18 @@ from sqlalchemy_cockroachdb import run_transaction from typing import Dict, List, Optional, Set, Tuple from common.proto.context_pb2 import Device, DeviceId, DeviceIdList, DeviceList from common.rpc_method_wrapper.ServiceExceptions import InvalidArgumentException, NotFoundException -from common.tools.grpc.Tools import grpc_message_to_json_string -from context.service.database.models.ConfigRuleModel import ConfigRuleKindEnum, ConfigRuleModel +from common.tools.object_factory.Device import json_device_id +#from common.tools.grpc.Tools import grpc_message_to_json_string +#from context.service.database.models.ConfigRuleModel import ConfigRuleKindEnum, ConfigRuleModel from context.service.database.models.DeviceModel import DeviceModel from context.service.database.models.EndPointModel import EndPointModel from context.service.database.models.RelationModels import TopologyDeviceModel -from context.service.database.models.enums.ConfigAction import grpc_to_enum__config_action +#from context.service.database.models.enums.ConfigAction import grpc_to_enum__config_action from context.service.database.models.enums.DeviceDriver import grpc_to_enum__device_driver from context.service.database.models.enums.DeviceOperationalStatus import grpc_to_enum__device_operational_status from context.service.database.models.enums.KpiSampleType import grpc_to_enum__kpi_sample_type +from .uuids.Device import device_get_uuid +from .uuids.EndPoint import endpoint_get_uuid def device_list_ids(db_engine : Engine) -> DeviceIdList: def callback(session : Session) -> List[Dict]: @@ -46,115 +48,121 @@ def device_list_objs(db_engine : Engine) -> DeviceList: return DeviceList(devices=run_transaction(sessionmaker(bind=db_engine), callback)) def device_get(db_engine : Engine, request : DeviceId) -> Device: - device_uuid = request.device_uuid.uuid + device_uuid = device_get_uuid(request, allow_random=False) def callback(session : Session) -> Optional[Dict]: obj : Optional[DeviceModel] = session.query(DeviceModel)\ .filter_by(device_uuid=device_uuid).one_or_none() return None if obj is None else obj.dump() obj = run_transaction(sessionmaker(bind=db_engine), callback) - if obj is None: raise NotFoundException('Device', device_uuid) + if obj is None: + raw_device_uuid = request.device_uuid.uuid + raise NotFoundException('Device', raw_device_uuid, extra_details=[ + 'device_uuid generated was: {:s}'.format(device_uuid) + ]) return Device(**obj) def device_set(db_engine : Engine, request : Device) -> bool: - device_uuid = request.device_id.device_uuid.uuid - device_name = request.name + raw_device_uuid = request.device_id.device_uuid.uuid + raw_device_name = request.name + device_name = request.device_id.device_uuid.uuid if len(raw_device_name) == 0 else raw_device_name + device_uuid = device_get_uuid(request.device_id, device_name=device_name, allow_random=True) + device_type = request.device_type oper_status = grpc_to_enum__device_operational_status(request.device_operational_status) device_drivers = [grpc_to_enum__device_driver(d) for d in request.device_drivers] - topology_keys : Set[Tuple[str, str]] = set() + topology_uuids : Set[str] = set() related_topologies : List[Dict] = list() endpoints_data : List[Dict] = list() for i, endpoint in enumerate(request.device_endpoints): endpoint_device_uuid = endpoint.endpoint_id.device_id.device_uuid.uuid if len(endpoint_device_uuid) == 0: endpoint_device_uuid = device_uuid - if device_uuid != endpoint_device_uuid: + if endpoint_device_uuid not in {raw_device_uuid, device_uuid}: raise InvalidArgumentException( 'request.device_endpoints[{:d}].device_id.device_uuid.uuid'.format(i), endpoint_device_uuid, - ['should be == {:s}({:s})'.format('request.device_id.device_uuid.uuid', device_uuid)]) + ['should be == request.device_id.device_uuid.uuid({:s})'.format(raw_device_uuid)] + ) - endpoint_context_uuid = endpoint.endpoint_id.topology_id.context_id.context_uuid.uuid - endpoint_topology_uuid = endpoint.endpoint_id.topology_id.topology_uuid.uuid + raw_endpoint_name = endpoint.name + endpoint_topology_uuid, endpoint_device_uuid, endpoint_uuid = endpoint_get_uuid( + endpoint.endpoint_id, endpoint_name=raw_endpoint_name, allow_random=True) kpi_sample_types = [grpc_to_enum__kpi_sample_type(kst) for kst in endpoint.kpi_sample_types] endpoints_data.append({ - 'context_uuid' : endpoint_context_uuid, - 'topology_uuid' : endpoint_topology_uuid, + 'endpoint_uuid' : endpoint_uuid, 'device_uuid' : endpoint_device_uuid, - 'endpoint_uuid' : endpoint.endpoint_id.endpoint_uuid.uuid, + 'topology_uuid' : endpoint_topology_uuid, + 'name' : raw_endpoint_name, 'endpoint_type' : endpoint.endpoint_type, 'kpi_sample_types': kpi_sample_types, }) - if len(endpoint_context_uuid) > 0 and len(endpoint_topology_uuid) > 0: - topology_key = (endpoint_context_uuid, endpoint_topology_uuid) - if topology_key not in topology_keys: - related_topologies.append({ - 'context_uuid': endpoint_context_uuid, - 'topology_uuid': endpoint_topology_uuid, - 'device_uuid': endpoint_device_uuid, - }) - topology_keys.add(topology_key) + if endpoint_topology_uuid not in topology_uuids: + related_topologies.append({ + 'topology_uuid': endpoint_topology_uuid, + 'device_uuid' : endpoint_device_uuid, + }) + topology_uuids.add(endpoint_topology_uuid) - config_rules : List[Dict] = list() - for position,config_rule in enumerate(request.device_config.config_rules): - str_kind = config_rule.WhichOneof('config_rule') - config_rules.append({ - 'device_uuid': device_uuid, - 'kind' : ConfigRuleKindEnum._member_map_.get(str_kind.upper()), # pylint: disable=no-member - 'action' : grpc_to_enum__config_action(config_rule.action), - 'position' : position, - 'data' : grpc_message_to_json_string(getattr(config_rule, str_kind, {})), - }) + #config_rules : List[Dict] = list() + #for position,config_rule in enumerate(request.device_config.config_rules): + # str_kind = config_rule.WhichOneof('config_rule') + # config_rules.append({ + # 'device_uuid': device_uuid, + # 'kind' : ConfigRuleKindEnum._member_map_.get(str_kind.upper()), # pylint: disable=no-member + # 'action' : grpc_to_enum__config_action(config_rule.action), + # 'position' : position, + # 'data' : grpc_message_to_json_string(getattr(config_rule, str_kind, {})), + # }) + + device_data = [{ + 'device_uuid' : device_uuid, + 'device_name' : device_name, + 'device_type' : device_type, + 'device_operational_status': oper_status, + 'device_drivers' : device_drivers, + }] def callback(session : Session) -> None: - obj : Optional[DeviceModel] = session.query(DeviceModel).with_for_update()\ - .filter_by(device_uuid=device_uuid).one_or_none() - is_update = obj is not None - if is_update: - obj.device_name = device_name - obj.device_type = device_type - obj.device_operational_status = oper_status - obj.device_drivers = device_drivers - session.merge(obj) - else: - session.add(DeviceModel( - device_uuid=device_uuid, device_name=device_name, device_type=device_type, - device_operational_status=oper_status, device_drivers=device_drivers, created_at=time.time())) - obj : Optional[DeviceModel] = session.query(DeviceModel)\ - .filter_by(device_uuid=device_uuid).one_or_none() + stmt = insert(DeviceModel).values(device_data) + stmt = stmt.on_conflict_do_update( + index_elements=[DeviceModel.device_uuid], + set_=dict( + device_name = stmt.excluded.device_name, + device_type = stmt.excluded.device_type, + device_operational_status = stmt.excluded.device_operational_status, + device_drivers = stmt.excluded.device_drivers, + ) + ) + session.execute(stmt) stmt = insert(EndPointModel).values(endpoints_data) stmt = stmt.on_conflict_do_update( - index_elements=[ - EndPointModel.context_uuid, EndPointModel.topology_uuid, EndPointModel.device_uuid, - EndPointModel.endpoint_uuid - ], + index_elements=[EndPointModel.endpoint_uuid], set_=dict( - endpoint_type = stmt.excluded.endpoint_type, + name = stmt.excluded.name, + endpoint_type = stmt.excluded.endpoint_type, kpi_sample_types = stmt.excluded.kpi_sample_types, ) ) session.execute(stmt) session.execute(insert(TopologyDeviceModel).values(related_topologies).on_conflict_do_nothing( - index_elements=[ - TopologyDeviceModel.context_uuid, TopologyDeviceModel.topology_uuid, - TopologyDeviceModel.device_uuid - ] + index_elements=[TopologyDeviceModel.topology_uuid, TopologyDeviceModel.device_uuid] )) - session.execute(delete(ConfigRuleModel).where(ConfigRuleModel.device_uuid == device_uuid)) - session.execute(insert(ConfigRuleModel).values(config_rules)) + #session.execute(delete(ConfigRuleModel).where(ConfigRuleModel.device_uuid == device_uuid)) + #session.execute(insert(ConfigRuleModel).values(config_rules)) run_transaction(sessionmaker(bind=db_engine), callback) - return False # TODO: improve and check if created/updated + updated = False # TODO: improve and check if created/updated + return DeviceId(**json_device_id(device_uuid)),updated def device_delete(db_engine : Engine, request : DeviceId) -> bool: - device_uuid = request.device_uuid.uuid + device_uuid = device_get_uuid(request, allow_random=False) def callback(session : Session) -> bool: - session.query(TopologyDeviceModel).filter_by(device_uuid=device_uuid).delete() + #session.query(TopologyDeviceModel).filter_by(device_uuid=device_uuid).delete() num_deleted = session.query(DeviceModel).filter_by(device_uuid=device_uuid).delete() #db_device = session.query(DeviceModel).filter_by(device_uuid=device_uuid).one_or_none() #session.query(ConfigRuleModel).filter_by(config_uuid=db_device.device_config_uuid).delete() diff --git a/src/context/service/database/methods/Service.py b/src/context/service/database/methods/Service.py new file mode 100644 index 000000000..9f5e519df --- /dev/null +++ b/src/context/service/database/methods/Service.py @@ -0,0 +1,263 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import time +from sqlalchemy.dialects.postgresql import insert +from sqlalchemy.engine import Engine +from sqlalchemy.orm import Session, sessionmaker +from sqlalchemy_cockroachdb import run_transaction +from typing import Dict, List, Optional +from common.proto.context_pb2 import ContextId, Service, ServiceId, ServiceIdList, ServiceList +from common.rpc_method_wrapper.ServiceExceptions import InvalidArgumentException, NotFoundException +from context.service.database.models.ServiceModel import ServiceModel + +def service_list_ids(db_engine : Engine, request : ContextId) -> ServiceIdList: + context_uuid = request.context_uuid.uuid + def callback(session : Session) -> List[Dict]: + obj_list : List[ServiceModel] = session.query(ServiceModel).filter_by(context_uuid=context_uuid).all() + #.options(selectinload(ContextModel.service)).filter_by(context_uuid=context_uuid).one_or_none() + return [obj.dump_id() for obj in obj_list] + return ServiceIdList(service_ids=run_transaction(sessionmaker(bind=db_engine), callback)) + +def service_list_objs(db_engine : Engine, request : ContextId) -> ServiceList: + context_uuid = request.context_uuid.uuid + def callback(session : Session) -> List[Dict]: + obj_list : List[ServiceModel] = session.query(ServiceModel).filter_by(context_uuid=context_uuid).all() + #.options(selectinload(ContextModel.service)).filter_by(context_uuid=context_uuid).one_or_none() + return [obj.dump() for obj in obj_list] + return ServiceList(services=run_transaction(sessionmaker(bind=db_engine), callback)) + +def service_get(db_engine : Engine, request : ServiceId) -> Service: + context_uuid = request.context_id.context_uuid.uuid + service_uuid = request.service_uuid.uuid + + def callback(session : Session) -> Optional[Dict]: + obj : Optional[ServiceModel] = session.query(ServiceModel)\ + .filter_by(context_uuid=context_uuid, service_uuid=service_uuid).one_or_none() + return None if obj is None else obj.dump() + obj = run_transaction(sessionmaker(bind=db_engine), callback) + if obj is None: + obj_uuid = '{:s}/{:s}'.format(context_uuid, service_uuid) + raise NotFoundException('Service', obj_uuid) + return Service(**obj) + +def service_set(db_engine : Engine, request : Service) -> bool: + context_uuid = request.service_id.context_id.context_uuid.uuid + service_uuid = request.service_id.service_uuid.uuid + service_name = request.name + + for i,endpoint_id in enumerate(request.service_endpoint_ids): + endpoint_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid + if len(endpoint_context_uuid) > 0 and context_uuid != endpoint_context_uuid: + raise InvalidArgumentException( + 'request.service_endpoint_ids[{:d}].topology_id.context_id.context_uuid.uuid'.format(i), + endpoint_context_uuid, + ['should be == {:s}({:s})'.format('request.service_id.context_id.context_uuid.uuid', context_uuid)]) + + + def callback(session : Session) -> None: + service_data = [{ + 'context_uuid' : context_uuid, + 'service_uuid': service_uuid, + 'service_name': service_name, + 'created_at' : time.time(), + }] + stmt = insert(ServiceModel).values(service_data) + stmt = stmt.on_conflict_do_update( + index_elements=[ServiceModel.context_uuid, ServiceModel.service_uuid], + set_=dict(service_name = stmt.excluded.service_name) + ) + session.execute(stmt) + + run_transaction(sessionmaker(bind=db_engine), callback) + return False # TODO: improve and check if created/updated + + +# # db_context : ContextModel = get_object(self.database, ContextModel, context_uuid) +# db_context = session.query(ContextModel).filter_by(context_uuid=context_uuid).one_or_none() +# # str_service_key = key_to_str([context_uuid, service_uuid]) +# constraints_result = self.set_constraints(service_uuid, 'constraints', request.service_constraints) +# db_constraints = constraints_result[0][0] +# +# config_rules = grpc_config_rules_to_raw(request.service_config.config_rules) +# running_config_result = update_config(self.database, str_service_key, 'running', config_rules) +# db_running_config = running_config_result[0][0] +# +# result : Tuple[ServiceModel, bool] = update_or_create_object(self.database, ServiceModel, str_service_key, { +# 'context_fk' : db_context, +# 'service_uuid' : service_uuid, +# 'service_type' : grpc_to_enum__service_type(request.service_type), +# 'service_constraints_fk': db_constraints, +# 'service_status' : grpc_to_enum__service_status(request.service_status.service_status), +# 'service_config_fk' : db_running_config, +# }) +# db_service, updated = result +# +# for i,endpoint_id in enumerate(request.service_endpoint_ids): +# endpoint_uuid = endpoint_id.endpoint_uuid.uuid +# endpoint_device_uuid = endpoint_id.device_id.device_uuid.uuid +# endpoint_topology_uuid = endpoint_id.topology_id.topology_uuid.uuid +# endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid +# +# str_endpoint_key = key_to_str([endpoint_device_uuid, endpoint_uuid]) +# if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0: +# str_topology_key = key_to_str([endpoint_topology_context_uuid, endpoint_topology_uuid]) +# str_endpoint_key = key_to_str([str_endpoint_key, str_topology_key], separator=':') +# +# db_endpoint : EndPointModel = get_object(self.database, EndPointModel, str_endpoint_key) +# +# str_service_endpoint_key = key_to_str([service_uuid, str_endpoint_key], separator='--') +# result : Tuple[ServiceEndPointModel, bool] = get_or_create_object( +# self.database, ServiceEndPointModel, str_service_endpoint_key, { +# 'service_fk': db_service, 'endpoint_fk': db_endpoint}) +# #db_service_endpoint, service_endpoint_created = result +# +# event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE +# dict_service_id = db_service.dump_id() +# notify_event(self.messagebroker, TOPIC_SERVICE, event_type, {'service_id': dict_service_id}) +# return ServiceId(**dict_service_id) +# context_uuid = request.service_id.context_id.context_uuid.uuid +# db_context : ContextModel = get_object(self.database, ContextModel, context_uuid) +# +# for i,endpoint_id in enumerate(request.service_endpoint_ids): +# endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid +# if len(endpoint_topology_context_uuid) > 0 and context_uuid != endpoint_topology_context_uuid: +# raise InvalidArgumentException( +# 'request.service_endpoint_ids[{:d}].topology_id.context_id.context_uuid.uuid'.format(i), +# endpoint_topology_context_uuid, +# ['should be == {:s}({:s})'.format( +# 'request.service_id.context_id.context_uuid.uuid', context_uuid)]) +# +# service_uuid = request.service_id.service_uuid.uuid +# str_service_key = key_to_str([context_uuid, service_uuid]) +# +# constraints_result = set_constraints( +# self.database, str_service_key, 'service', request.service_constraints) +# db_constraints = constraints_result[0][0] +# +# running_config_rules = update_config( +# self.database, str_service_key, 'service', request.service_config.config_rules) +# db_running_config = running_config_rules[0][0] +# +# result : Tuple[ServiceModel, bool] = update_or_create_object(self.database, ServiceModel, str_service_key, { +# 'context_fk' : db_context, +# 'service_uuid' : service_uuid, +# 'service_type' : grpc_to_enum__service_type(request.service_type), +# 'service_constraints_fk': db_constraints, +# 'service_status' : grpc_to_enum__service_status(request.service_status.service_status), +# 'service_config_fk' : db_running_config, +# }) +# db_service, updated = result +# +# for i,endpoint_id in enumerate(request.service_endpoint_ids): +# endpoint_uuid = endpoint_id.endpoint_uuid.uuid +# endpoint_device_uuid = endpoint_id.device_id.device_uuid.uuid +# endpoint_topology_uuid = endpoint_id.topology_id.topology_uuid.uuid +# endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid +# +# str_endpoint_key = key_to_str([endpoint_device_uuid, endpoint_uuid]) +# if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0: +# str_topology_key = key_to_str([endpoint_topology_context_uuid, endpoint_topology_uuid]) +# str_endpoint_key = key_to_str([str_endpoint_key, str_topology_key], separator=':') +# +# db_endpoint : EndPointModel = get_object(self.database, EndPointModel, str_endpoint_key) +# +# str_service_endpoint_key = key_to_str([service_uuid, str_endpoint_key], separator='--') +# result : Tuple[ServiceEndPointModel, bool] = get_or_create_object( +# self.database, ServiceEndPointModel, str_service_endpoint_key, { +# 'service_fk': db_service, 'endpoint_fk': db_endpoint}) +# #db_service_endpoint, service_endpoint_created = result +# +# event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE +# dict_service_id = db_service.dump_id() +# notify_event(self.messagebroker, TOPIC_SERVICE, event_type, {'service_id': dict_service_id}) +# return ServiceId(**dict_service_id) + + +# def set_constraint(self, db_constraints: ConstraintsModel, grpc_constraint: Constraint, position: int +# ) -> Tuple[Union_ConstraintModel, bool]: +# with self.session() as session: +# +# grpc_constraint_kind = str(grpc_constraint.WhichOneof('constraint')) +# +# parser = CONSTRAINT_PARSERS.get(grpc_constraint_kind) +# if parser is None: +# raise NotImplementedError('Constraint of kind {:s} is not implemented: {:s}'.format( +# grpc_constraint_kind, grpc_message_to_json_string(grpc_constraint))) +# +# # create specific constraint +# constraint_class, str_constraint_id, constraint_data, constraint_kind = parser(grpc_constraint) +# str_constraint_id = str(uuid.uuid4()) +# LOGGER.info('str_constraint_id: {}'.format(str_constraint_id)) +# # str_constraint_key_hash = fast_hasher(':'.join([constraint_kind.value, str_constraint_id])) +# # str_constraint_key = key_to_str([db_constraints.pk, str_constraint_key_hash], separator=':') +# +# # result : Tuple[Union_ConstraintModel, bool] = update_or_create_object( +# # database, constraint_class, str_constraint_key, constraint_data) +# constraint_data[constraint_class.main_pk_name()] = str_constraint_id +# db_new_constraint = constraint_class(**constraint_data) +# result: Tuple[Union_ConstraintModel, bool] = self.database.create_or_update(db_new_constraint) +# db_specific_constraint, updated = result +# +# # create generic constraint +# # constraint_fk_field_name = 'constraint_uuid'.format(constraint_kind.value) +# constraint_data = { +# 'constraints_uuid': db_constraints.constraints_uuid, 'position': position, 'kind': constraint_kind +# } +# +# db_new_constraint = ConstraintModel(**constraint_data) +# result: Tuple[Union_ConstraintModel, bool] = self.database.create_or_update(db_new_constraint) +# db_constraint, updated = result +# +# return db_constraint, updated +# +# def set_constraints(self, service_uuid: str, constraints_name : str, grpc_constraints +# ) -> List[Tuple[Union[ConstraintsModel, ConstraintModel], bool]]: +# with self.session() as session: +# # str_constraints_key = key_to_str([db_parent_pk, constraints_name], separator=':') +# # result : Tuple[ConstraintsModel, bool] = get_or_create_object(database, ConstraintsModel, str_constraints_key) +# result = session.query(ConstraintsModel).filter_by(constraints_uuid=service_uuid).one_or_none() +# created = None +# if result: +# created = True +# session.query(ConstraintsModel).filter_by(constraints_uuid=service_uuid).one_or_none() +# db_constraints = ConstraintsModel(constraints_uuid=service_uuid) +# session.add(db_constraints) +# +# db_objects = [(db_constraints, created)] +# +# for position,grpc_constraint in enumerate(grpc_constraints): +# result : Tuple[ConstraintModel, bool] = self.set_constraint( +# db_constraints, grpc_constraint, position) +# db_constraint, updated = result +# db_objects.append((db_constraint, updated)) +# +# return db_objects + +def service_delete(db_engine : Engine, request : ServiceId) -> bool: + context_uuid = request.context_id.context_uuid.uuid + service_uuid = request.service_uuid.uuid + def callback(session : Session) -> bool: + num_deleted = session.query(ServiceModel)\ + .filter_by(context_uuid=context_uuid, service_uuid=service_uuid).delete() + return num_deleted > 0 + return run_transaction(sessionmaker(bind=db_engine), callback) + + # def delete(self) -> None: + # from .RelationModels import ServiceEndPointModel + # for db_service_endpoint_pk,_ in self.references(ServiceEndPointModel): + # ServiceEndPointModel(self.database, db_service_endpoint_pk).delete() + # super().delete() + # ConfigModel(self.database, self.service_config_fk).delete() + # ConstraintsModel(self.database, self.service_constraints_fk).delete() diff --git a/src/context/service/database/methods/Topology.py b/src/context/service/database/methods/Topology.py index f9449e0c3..1abbc5562 100644 --- a/src/context/service/database/methods/Topology.py +++ b/src/context/service/database/methods/Topology.py @@ -12,19 +12,22 @@ # See the License for the specific language governing permissions and # limitations under the License. -import time from sqlalchemy.dialects.postgresql import insert from sqlalchemy.engine import Engine from sqlalchemy.orm import Session, sessionmaker from sqlalchemy_cockroachdb import run_transaction from typing import Dict, List, Optional, Set from common.proto.context_pb2 import ContextId, Topology, TopologyId, TopologyIdList, TopologyList -from common.rpc_method_wrapper.ServiceExceptions import NotFoundException -from context.service.database.models.RelationModels import TopologyDeviceModel +from common.rpc_method_wrapper.ServiceExceptions import InvalidArgumentsException, NotFoundException +from common.tools.object_factory.Context import json_context_id +from common.tools.object_factory.Topology import json_topology_id +#from context.service.database.models.RelationModels import TopologyDeviceModel, TopologyLinkModel from context.service.database.models.TopologyModel import TopologyModel +from .uuids.Context import context_get_uuid +from .uuids.Topology import topology_get_uuid def topology_list_ids(db_engine : Engine, request : ContextId) -> TopologyIdList: - context_uuid = request.context_uuid.uuid + context_uuid = context_get_uuid(request, allow_random=False) def callback(session : Session) -> List[Dict]: obj_list : List[TopologyModel] = session.query(TopologyModel).filter_by(context_uuid=context_uuid).all() #.options(selectinload(ContextModel.topology)).filter_by(context_uuid=context_uuid).one_or_none() @@ -32,7 +35,7 @@ def topology_list_ids(db_engine : Engine, request : ContextId) -> TopologyIdList return TopologyIdList(topology_ids=run_transaction(sessionmaker(bind=db_engine), callback)) def topology_list_objs(db_engine : Engine, request : ContextId) -> TopologyList: - context_uuid = request.context_uuid.uuid + context_uuid = context_get_uuid(request, allow_random=False) def callback(session : Session) -> List[Dict]: obj_list : List[TopologyModel] = session.query(TopologyModel).filter_by(context_uuid=context_uuid).all() #.options(selectinload(ContextModel.topology)).filter_by(context_uuid=context_uuid).one_or_none() @@ -40,84 +43,74 @@ def topology_list_objs(db_engine : Engine, request : ContextId) -> TopologyList: return TopologyList(topologies=run_transaction(sessionmaker(bind=db_engine), callback)) def topology_get(db_engine : Engine, request : TopologyId) -> Topology: - context_uuid = request.context_id.context_uuid.uuid - topology_uuid = request.topology_uuid.uuid - + _,topology_uuid = topology_get_uuid(request, allow_random=False) def callback(session : Session) -> Optional[Dict]: obj : Optional[TopologyModel] = session.query(TopologyModel)\ - .filter_by(context_uuid=context_uuid, topology_uuid=topology_uuid).one_or_none() + .filter_by(topology_uuid=topology_uuid).one_or_none() return None if obj is None else obj.dump() obj = run_transaction(sessionmaker(bind=db_engine), callback) if obj is None: - obj_uuid = '{:s}/{:s}'.format(context_uuid, topology_uuid) - raise NotFoundException('Topology', obj_uuid) + context_uuid = context_get_uuid(request.context_id, allow_random=False) + raw_topology_uuid = '{:s}/{:s}'.format(request.context_id.context_uuid.uuid, request.topology_uuid.uuid) + raise NotFoundException('Topology', raw_topology_uuid, extra_details=[ + 'context_uuid generated was: {:s}'.format(context_uuid), + 'topology_uuid generated was: {:s}'.format(topology_uuid), + ]) return Topology(**obj) def topology_set(db_engine : Engine, request : Topology) -> bool: - context_uuid = request.topology_id.context_id.context_uuid.uuid - topology_uuid = request.topology_id.topology_uuid.uuid topology_name = request.name + if len(topology_name) == 0: topology_name = request.topology_id.topology_uuid.uuid + context_uuid,topology_uuid = topology_get_uuid(request.topology_id, topology_name=topology_name, allow_random=True) + + #device_uuids : Set[str] = set() + #devices_to_add : List[Dict] = list() + #for device_id in request.device_ids: + # device_uuid = device_id.device_uuid.uuid + # if device_uuid in device_uuids: continue + # devices_to_add.append({'topology_uuid': topology_uuid, 'device_uuid': device_uuid}) + # device_uuids.add(device_uuid) - device_uuids : Set[str] = set() - devices_to_add : List[Dict] = list() - for device_id in request.device_ids: - device_uuid = device_id.device_uuid.uuid - if device_uuid in device_uuids: continue - devices_to_add.append({ - 'context_uuid': context_uuid, 'topology_uuid': topology_uuid, 'device_uuid': device_uuid - }) - device_uuids.add(device_uuid) + #link_uuids : Set[str] = set() + #links_to_add : List[Dict] = list() + #for link_id in request.link_ids: + # link_uuid = link_id.link_uuid.uuid + # if link_uuid in link_uuids: continue + # links_to_add.append({'topology_uuid': topology_uuid, 'link_uuid': link_uuid}) + # link_uuids.add(link_uuid) - link_uuids : Set[str] = set() - links_to_add : List[Dict] = list() - for link_id in request.link_ids: - link_uuid = link_id.link_uuid.uuid - if link_uuid in link_uuids: continue - links_to_add.append({ - 'context_uuid': context_uuid, 'topology_uuid': topology_uuid, 'link_uuid': link_uuid - }) - link_uuids.add(link_uuid) + topology_data = [{ + 'context_uuid' : context_uuid, + 'topology_uuid': topology_uuid, + 'topology_name': topology_name, + }] def callback(session : Session) -> None: - topology_data = [{ - 'context_uuid' : context_uuid, - 'topology_uuid': topology_uuid, - 'topology_name': topology_name, - 'created_at' : time.time(), - }] stmt = insert(TopologyModel).values(topology_data) stmt = stmt.on_conflict_do_update( - index_elements=[TopologyModel.context_uuid, TopologyModel.topology_uuid], + index_elements=[TopologyModel.topology_uuid], set_=dict(topology_name = stmt.excluded.topology_name) ) session.execute(stmt) - if len(devices_to_add) > 0: - session.execute(insert(TopologyDeviceModel).values(devices_to_add).on_conflict_do_nothing( - index_elements=[ - TopologyDeviceModel.context_uuid, TopologyDeviceModel.topology_uuid, - TopologyDeviceModel.device_uuid - ] - )) + #if len(devices_to_add) > 0: + # session.execute(insert(TopologyDeviceModel).values(devices_to_add).on_conflict_do_nothing( + # index_elements=[TopologyDeviceModel.topology_uuid, TopologyDeviceModel.device_uuid] + # )) - #if len(link_to_add) > 0: + #if len(links_to_add) > 0: # session.execute(insert(TopologyLinkModel).values(links_to_add).on_conflict_do_nothing( - # index_elements=[ - # TopologyLinkModel.context_uuid, TopologyLinkModel.topology_uuid, - # TopologyLinkModel.link_uuid - # ] + # index_elements=[TopologyLinkModel.topology_uuid, TopologyLinkModel.link_uuid] # )) run_transaction(sessionmaker(bind=db_engine), callback) - return False # TODO: improve and check if created/updated + updated = False # TODO: improve and check if created/updated + return TopologyId(**json_topology_id(topology_uuid, json_context_id(context_uuid))),updated def topology_delete(db_engine : Engine, request : TopologyId) -> bool: - context_uuid = request.context_id.context_uuid.uuid - topology_uuid = request.topology_uuid.uuid - + _,topology_uuid = topology_get_uuid(request, allow_random=False) def callback(session : Session) -> bool: num_deleted = session.query(TopologyModel)\ - .filter_by(context_uuid=context_uuid, topology_uuid=topology_uuid).delete() + .filter_by(topology_uuid=topology_uuid).delete() return num_deleted > 0 - return run_transaction(sessionmaker(bind=db_engine), callback) diff --git a/src/context/service/database/methods/uuids/Context.py b/src/context/service/database/methods/uuids/Context.py new file mode 100644 index 000000000..753f80e9c --- /dev/null +++ b/src/context/service/database/methods/uuids/Context.py @@ -0,0 +1,33 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from common.proto.context_pb2 import ContextId +from common.rpc_method_wrapper.ServiceExceptions import InvalidArgumentsException +from ._Builder import get_uuid_from_string, get_uuid_random + +def context_get_uuid( + context_id : ContextId, context_name : str = '', allow_random : bool = False +) -> str: + context_uuid = context_id.context_uuid.uuid + + if len(context_uuid) > 0: + return get_uuid_from_string(context_uuid) + if len(context_name) > 0: + return get_uuid_from_string(context_name) + if allow_random: return get_uuid_random() + + raise InvalidArgumentsException([ + ('context_id.context_uuid.uuid', context_uuid), + ('name', context_name), + ], extra_details=['At least one is required to produce a Context UUID']) diff --git a/src/context/service/database/methods/uuids/Device.py b/src/context/service/database/methods/uuids/Device.py new file mode 100644 index 000000000..c1b66759b --- /dev/null +++ b/src/context/service/database/methods/uuids/Device.py @@ -0,0 +1,33 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from common.proto.context_pb2 import DeviceId +from common.rpc_method_wrapper.ServiceExceptions import InvalidArgumentsException +from ._Builder import get_uuid_from_string, get_uuid_random + +def device_get_uuid( + device_id : DeviceId, device_name : str = '', allow_random : bool = False +) -> str: + device_uuid = device_id.device_uuid.uuid + + if len(device_uuid) > 0: + return get_uuid_from_string(device_uuid) + if len(device_name) > 0: + return get_uuid_from_string(device_name) + if allow_random: return get_uuid_random() + + raise InvalidArgumentsException([ + ('device_id.device_uuid.uuid', device_uuid), + ('name', device_name), + ], extra_details=['At least one is required to produce a Device UUID']) diff --git a/src/context/service/database/methods/uuids/EndPoint.py b/src/context/service/database/methods/uuids/EndPoint.py new file mode 100644 index 000000000..7afb87184 --- /dev/null +++ b/src/context/service/database/methods/uuids/EndPoint.py @@ -0,0 +1,41 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Tuple +from common.proto.context_pb2 import EndPointId +from common.rpc_method_wrapper.ServiceExceptions import InvalidArgumentsException +from ._Builder import get_uuid_from_string, get_uuid_random +from .Device import device_get_uuid +from .Topology import topology_get_uuid + +def endpoint_get_uuid( + endpoint_id : EndPointId, endpoint_name : str = '', allow_random : bool = False +) -> Tuple[str, str, str]: + device_uuid = device_get_uuid(endpoint_id.device_id, allow_random=False) + _,topology_uuid = topology_get_uuid(endpoint_id.topology_id, allow_random=False) + raw_endpoint_uuid = endpoint_id.endpoint_uuid.uuid + + if len(raw_endpoint_uuid) > 0: + prefix_for_name = '{:s}/{:s}'.format(topology_uuid, device_uuid) + return topology_uuid, device_uuid, get_uuid_from_string(raw_endpoint_uuid, prefix_for_name=prefix_for_name) + if len(endpoint_name) > 0: + prefix_for_name = '{:s}/{:s}'.format(topology_uuid, device_uuid) + return topology_uuid, device_uuid, get_uuid_from_string(endpoint_name, prefix_for_name=prefix_for_name) + if allow_random: + return topology_uuid, device_uuid, get_uuid_random() + + raise InvalidArgumentsException([ + ('endpoint_id.endpoint_uuid.uuid', raw_endpoint_uuid), + ('name', endpoint_name), + ], extra_details=['At least one is required to produce a EndPoint UUID']) diff --git a/src/context/service/database/methods/uuids/Link.py b/src/context/service/database/methods/uuids/Link.py new file mode 100644 index 000000000..d1ae4c21f --- /dev/null +++ b/src/context/service/database/methods/uuids/Link.py @@ -0,0 +1,33 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from common.proto.context_pb2 import LinkId +from common.rpc_method_wrapper.ServiceExceptions import InvalidArgumentsException +from ._Builder import get_uuid_from_string, get_uuid_random + +def link_get_uuid( + link_id : LinkId, link_name : str = '', allow_random : bool = False +) -> str: + link_uuid = link_id.link_uuid.uuid + + if len(link_uuid) > 0: + return get_uuid_from_string(link_uuid) + if len(link_name) > 0: + return get_uuid_from_string(link_name) + if allow_random: return get_uuid_random() + + raise InvalidArgumentsException([ + ('link_id.link_uuid.uuid', link_uuid), + ('name', link_name), + ], extra_details=['At least one is required to produce a Link UUID']) diff --git a/src/context/service/database/methods/uuids/Topology.py b/src/context/service/database/methods/uuids/Topology.py new file mode 100644 index 000000000..c3c9175d8 --- /dev/null +++ b/src/context/service/database/methods/uuids/Topology.py @@ -0,0 +1,37 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Tuple +from common.proto.context_pb2 import TopologyId +from common.rpc_method_wrapper.ServiceExceptions import InvalidArgumentsException +from ._Builder import get_uuid_from_string, get_uuid_random +from .Context import context_get_uuid + +def topology_get_uuid( + topology_id : TopologyId, topology_name : str = '', allow_random : bool = False +) -> Tuple[str, str]: + context_uuid = context_get_uuid(topology_id.context_id, allow_random=False) + raw_topology_uuid = topology_id.topology_uuid.uuid + + if len(raw_topology_uuid) > 0: + return context_uuid, get_uuid_from_string(raw_topology_uuid, prefix_for_name=context_uuid) + if len(topology_name) > 0: + return context_uuid, get_uuid_from_string(topology_name, prefix_for_name=context_uuid) + if allow_random: + return context_uuid, get_uuid_random() + + raise InvalidArgumentsException([ + ('topology_id.topology_uuid.uuid', raw_topology_uuid), + ('name', topology_name), + ], extra_details=['At least one is required to produce a Topology UUID']) diff --git a/src/context/service/database/methods/uuids/_Builder.py b/src/context/service/database/methods/uuids/_Builder.py new file mode 100644 index 000000000..55384433b --- /dev/null +++ b/src/context/service/database/methods/uuids/_Builder.py @@ -0,0 +1,44 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Optional, Union +from uuid import UUID, uuid4, uuid5 + +# Generate a UUIDv5-like from the SHA-1 of "TFS" and no namespace to be used as the NAMESPACE for all +# the context UUIDs generated. For efficiency purposes, the UUID is hardcoded; however, it is produced +# using the following code: +# from hashlib import sha1 +# from uuid import UUID +# hash = sha1(bytes('TFS', 'utf-8')).digest() +# NAMESPACE_TFS = UUID(bytes=hash[:16], version=5) +NAMESPACE_TFS = UUID('200e3a1f-2223-534f-a100-758e29c37f40') + +def get_uuid_from_string(str_uuid_or_name : Union[str, UUID], prefix_for_name : Optional[str] = None) -> str: + # if UUID given, assume it is already a valid UUID + if isinstance(str_uuid_or_name, UUID): return str_uuid_or_name + if not isinstance(str_uuid_or_name, str): + MSG = 'Parameter({:s}) cannot be used to produce a UUID' + raise Exception(MSG.format(str(repr(str_uuid_or_name)))) + try: + # try to parse as UUID + return str(UUID(str_uuid_or_name)) + except: # pylint: disable=bare-except + # produce a UUID within TFS namespace from parameter + if prefix_for_name is not None: + str_uuid_or_name = '{:s}/{:s}'.format(prefix_for_name, str_uuid_or_name) + return str(uuid5(NAMESPACE_TFS, str_uuid_or_name)) + +def get_uuid_random() -> str: + # Generate random UUID. No need to use namespace since "namespace + random = random". + return str(uuid4()) diff --git a/src/context/service/database/methods/uuids/__init__.py b/src/context/service/database/methods/uuids/__init__.py new file mode 100644 index 000000000..9953c8205 --- /dev/null +++ b/src/context/service/database/methods/uuids/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/src/context/service/database/models/ConfigRuleModel.py b/src/context/service/database/models/ConfigRuleModel.py index d5a37eed2..a229f475d 100644 --- a/src/context/service/database/models/ConfigRuleModel.py +++ b/src/context/service/database/models/ConfigRuleModel.py @@ -13,32 +13,53 @@ # limitations under the License. import enum, json -from sqlalchemy import Column, ForeignKey, INTEGER, CheckConstraint, Enum, String, text +from sqlalchemy import Column, INTEGER, CheckConstraint, Enum, ForeignKeyConstraint, String, UniqueConstraint, text from sqlalchemy.dialects.postgresql import UUID from sqlalchemy.orm import relationship from typing import Dict from .enums.ConfigAction import ORM_ConfigActionEnum from ._Base import _Base -# enum values should match name of field in ConfigRuleModel +# Enum values should match name of field in ConfigRuleModel class ConfigRuleKindEnum(enum.Enum): CUSTOM = 'custom' ACL = 'acl' class ConfigRuleModel(_Base): __tablename__ = 'config_rule' - device_uuid = Column(UUID(as_uuid=False), ForeignKey('device.device_uuid', ondelete='CASCADE'), primary_key=True) - rule_uuid = Column(UUID(as_uuid=False), primary_key=True, server_default=text('uuid_generate_v4()')) - kind = Column(Enum(ConfigRuleKindEnum)) - action = Column(Enum(ORM_ConfigActionEnum)) - position = Column(INTEGER, nullable=False) - data = Column(String, nullable=False) + + config_rule_uuid = Column(UUID(as_uuid=False), primary_key=True, server_default=text('uuid_generate_v4()')) + device_uuid = Column(UUID(as_uuid=False)) # for device config rules + context_uuid = Column(UUID(as_uuid=False)) # for service/slice config rules + service_uuid = Column(UUID(as_uuid=False)) # for service config rules + #slice_uuid = Column(UUID(as_uuid=False)) # for slice config rules + kind = Column(Enum(ConfigRuleKindEnum)) + action = Column(Enum(ORM_ConfigActionEnum)) + position = Column(INTEGER, nullable=False) + data = Column(String, nullable=False) __table_args__ = ( CheckConstraint(position >= 0, name='check_position_value'), + UniqueConstraint('device_uuid', 'position', name='unique_per_device'), + UniqueConstraint('context_uuid', 'service_uuid', 'position', name='unique_per_service'), + #UniqueConstraint('context_uuid', 'slice_uuid', 'position', name='unique_per_slice'), + ForeignKeyConstraint( + ['device_uuid'], + ['device.device_uuid'], + ondelete='CASCADE'), + ForeignKeyConstraint( + ['context_uuid', 'service_uuid'], + ['service.context_uuid', 'service.service_uuid'], + ondelete='CASCADE'), + #ForeignKeyConstraint( + # ['context_uuid', 'slice_uuid'], + # ['slice.context_uuid', 'slice.slice_uuid'], + # ondelete='CASCADE'), ) device = relationship('DeviceModel', back_populates='config_rules') + service = relationship('ServiceModel', back_populates='config_rules') + #slice = relationship('SliceModel', back_populates='config_rules') def dump(self) -> Dict: return {self.kind.value: json.loads(self.data)} diff --git a/src/context/service/database/models/ContextModel.py b/src/context/service/database/models/ContextModel.py index a5ddeb596..84039dea9 100644 --- a/src/context/service/database/models/ContextModel.py +++ b/src/context/service/database/models/ContextModel.py @@ -12,28 +12,25 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Dict, List -from sqlalchemy import Column, Float, String +from typing import Dict +from sqlalchemy import Column, String from sqlalchemy.dialects.postgresql import UUID from sqlalchemy.orm import relationship from ._Base import _Base class ContextModel(_Base): __tablename__ = 'context' + context_uuid = Column(UUID(as_uuid=False), primary_key=True) - context_name = Column(String(), nullable=False) - created_at = Column(Float) + context_name = Column(String, nullable=False) topologies = relationship('TopologyModel', back_populates='context') - #services = relationship('ServiceModel', back_populates='context') - #slices = relationship('SliceModel', back_populates='context') + #services = relationship('ServiceModel', back_populates='context') + #slices = relationship('SliceModel', back_populates='context') def dump_id(self) -> Dict: return {'context_uuid': {'uuid': self.context_uuid}} - def dump_topology_ids(self) -> List[Dict]: - return - def dump(self) -> Dict: return { 'context_id' : self.dump_id(), diff --git a/src/context/service/database/models/DeviceModel.py b/src/context/service/database/models/DeviceModel.py index fb5853482..33e780411 100644 --- a/src/context/service/database/models/DeviceModel.py +++ b/src/context/service/database/models/DeviceModel.py @@ -14,12 +14,12 @@ import operator from typing import Dict -from sqlalchemy import Column, Float, String, Enum +from sqlalchemy import Column, String, Enum from sqlalchemy.dialects.postgresql import UUID, ARRAY from sqlalchemy.orm import relationship -from ._Base import _Base from .enums.DeviceDriver import ORM_DeviceDriverEnum from .enums.DeviceOperationalStatus import ORM_DeviceOperationalStatusEnum +from ._Base import _Base class DeviceModel(_Base): __tablename__ = 'device' @@ -28,10 +28,9 @@ class DeviceModel(_Base): device_type = Column(String, nullable=False) device_operational_status = Column(Enum(ORM_DeviceOperationalStatusEnum)) device_drivers = Column(ARRAY(Enum(ORM_DeviceDriverEnum), dimensions=1)) - created_at = Column(Float) topology_devices = relationship('TopologyDeviceModel', back_populates='device') - config_rules = relationship('ConfigRuleModel', passive_deletes=True, back_populates='device', lazy='joined') + #config_rules = relationship('ConfigRuleModel', passive_deletes=True, back_populates='device', lazy='joined') endpoints = relationship('EndPointModel', passive_deletes=True, back_populates='device', lazy='joined') def dump_id(self) -> Dict: @@ -45,8 +44,11 @@ class DeviceModel(_Base): 'device_operational_status': self.device_operational_status.value, 'device_drivers' : [driver.value for driver in self.device_drivers], 'device_config' : {'config_rules': [ - config_rule.dump() - for config_rule in sorted(self.config_rules, key=operator.attrgetter('position')) + #config_rule.dump() + #for config_rule in sorted(self.config_rules, key=operator.attrgetter('position')) ]}, - 'device_endpoints' : [endpoint.dump() for endpoint in self.endpoints], + 'device_endpoints' : [ + endpoint.dump() + for endpoint in self.endpoints + ], } diff --git a/src/context/service/database/models/EndPointModel.py b/src/context/service/database/models/EndPointModel.py index b7e4c9fe3..804b68847 100644 --- a/src/context/service/database/models/EndPointModel.py +++ b/src/context/service/database/models/EndPointModel.py @@ -13,7 +13,7 @@ # limitations under the License. from typing import Dict -from sqlalchemy import Column, String, Enum, ForeignKeyConstraint +from sqlalchemy import Column, Enum, ForeignKey, String from sqlalchemy.dialects.postgresql import ARRAY, UUID from sqlalchemy.orm import relationship from .enums.KpiSampleType import ORM_KpiSampleTypeEnum @@ -21,32 +21,23 @@ from ._Base import _Base class EndPointModel(_Base): __tablename__ = 'endpoint' - context_uuid = Column(UUID(as_uuid=False), primary_key=True) - topology_uuid = Column(UUID(as_uuid=False), primary_key=True) - device_uuid = Column(UUID(as_uuid=False), primary_key=True) - endpoint_uuid = Column(UUID(as_uuid=False), primary_key=True) - endpoint_type = Column(String) - kpi_sample_types = Column(ARRAY(Enum(ORM_KpiSampleTypeEnum), dimensions=1)) - __table_args__ = ( - ForeignKeyConstraint( - ['context_uuid', 'topology_uuid'], - ['topology.context_uuid', 'topology.topology_uuid'], - ondelete='CASCADE'), - ForeignKeyConstraint( - ['device_uuid'], - ['device.device_uuid'], - ondelete='CASCADE'), - ) + endpoint_uuid = Column(UUID(as_uuid=False), primary_key=True) + device_uuid = Column(UUID(as_uuid=False), ForeignKey('device.device_uuid', ondelete='CASCADE')) + topology_uuid = Column(UUID(as_uuid=False), ForeignKey('topology.topology_uuid', ondelete='RESTRICT')) + name = Column(String) + endpoint_type = Column(String) + kpi_sample_types = Column(ARRAY(Enum(ORM_KpiSampleTypeEnum), dimensions=1)) - topology = relationship('TopologyModel', back_populates='endpoints') - device = relationship('DeviceModel', back_populates='endpoints') - link_endpoints = relationship('LinkEndPointModel', back_populates='endpoint') + device = relationship('DeviceModel', back_populates='endpoints') + topology = relationship('TopologyModel') + #link_endpoints = relationship('LinkEndPointModel', back_populates='endpoint' ) + #service_endpoints = relationship('ServiceEndPointModel', back_populates='endpoint' ) def dump_id(self) -> Dict: result = { - 'topology_id': self.topology.dump_id(), - 'device_id': self.device.dump_id(), + 'topology_id' : self.topology.dump_id(), + 'device_id' : self.device.dump_id(), 'endpoint_uuid': {'uuid': self.endpoint_uuid}, } return result @@ -54,34 +45,7 @@ class EndPointModel(_Base): def dump(self) -> Dict: return { 'endpoint_id' : self.dump_id(), + 'name' : self.name, 'endpoint_type' : self.endpoint_type, 'kpi_sample_types': [kst.value for kst in self.kpi_sample_types], } - -# def get_endpoint( -# database : Database, grpc_endpoint_id : EndPointId, -# validate_topology_exists : bool = True, validate_device_in_topology : bool = True -# ) -> Tuple[str, EndPointModel]: -# endpoint_uuid = grpc_endpoint_id.endpoint_uuid.uuid -# endpoint_device_uuid = grpc_endpoint_id.device_id.device_uuid.uuid -# endpoint_topology_uuid = grpc_endpoint_id.topology_id.topology_uuid.uuid -# endpoint_topology_context_uuid = grpc_endpoint_id.topology_id.context_id.context_uuid.uuid -# str_endpoint_key = key_to_str([endpoint_device_uuid, endpoint_uuid]) -# -# if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0: -# # check topology exists -# str_topology_key = key_to_str([endpoint_topology_context_uuid, endpoint_topology_uuid]) -# if validate_topology_exists: -# from .TopologyModel import TopologyModel -# get_object(database, TopologyModel, str_topology_key) -# -# # check device is in topology -# str_topology_device_key = key_to_str([str_topology_key, endpoint_device_uuid], separator='--') -# if validate_device_in_topology: -# from .RelationModels import TopologyDeviceModel -# get_object(database, TopologyDeviceModel, str_topology_device_key) -# -# str_endpoint_key = key_to_str([str_endpoint_key, str_topology_key], separator=':') -# -# db_endpoint : EndPointModel = get_object(database, EndPointModel, str_endpoint_key) -# return str_endpoint_key, db_endpoint diff --git a/src/context/service/database/models/LinkModel.py b/src/context/service/database/models/LinkModel.py index df173f527..eec871e77 100644 --- a/src/context/service/database/models/LinkModel.py +++ b/src/context/service/database/models/LinkModel.py @@ -20,6 +20,7 @@ from ._Base import _Base class LinkModel(_Base): __tablename__ = 'link' + link_uuid = Column(UUID(as_uuid=False), primary_key=True) link_name = Column(String, nullable=False) created_at = Column(Float) diff --git a/src/context/service/database/models/RelationModels.py b/src/context/service/database/models/RelationModels.py index 6cc4ff86c..38d93bee7 100644 --- a/src/context/service/database/models/RelationModels.py +++ b/src/context/service/database/models/RelationModels.py @@ -12,49 +12,66 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging from sqlalchemy import Column, ForeignKey, ForeignKeyConstraint from sqlalchemy.dialects.postgresql import UUID from sqlalchemy.orm import relationship -from context.service.database.models._Base import _Base - -LOGGER = logging.getLogger(__name__) +from ._Base import _Base # class ConnectionSubServiceModel(Model): # pk = PrimaryKeyField() # connection_fk = ForeignKeyField(ConnectionModel) # sub_service_fk = ForeignKeyField(ServiceModel) - -# link_uuid = Column(UUID(as_uuid=False), ForeignKey("Link.link_uuid")) -# endpoint_uuid = Column(UUID(as_uuid=False), ForeignKey("EndPoint.endpoint_uuid"), primary_key=True) - -class LinkEndPointModel(_Base): - __tablename__ = 'link_endpoint' - link_uuid = Column(UUID(as_uuid=False), primary_key=True) - context_uuid = Column(UUID(as_uuid=False), primary_key=True) - topology_uuid = Column(UUID(as_uuid=False), primary_key=True) - device_uuid = Column(UUID(as_uuid=False), primary_key=True) - endpoint_uuid = Column(UUID(as_uuid=False), primary_key=True) - - link = relationship('LinkModel', back_populates='link_endpoints', lazy='joined') - endpoint = relationship('EndPointModel', back_populates='link_endpoints', lazy='joined') - - __table_args__ = ( - ForeignKeyConstraint( - ['link_uuid'], - ['link.link_uuid'], - ondelete='CASCADE'), - ForeignKeyConstraint( - ['context_uuid', 'topology_uuid', 'device_uuid', 'endpoint_uuid'], - ['endpoint.context_uuid', 'endpoint.topology_uuid', 'endpoint.device_uuid', 'endpoint.endpoint_uuid'], - ondelete='CASCADE'), - ) - -# class ServiceEndPointModel(Model): -# pk = PrimaryKeyField() -# service_fk = ForeignKeyField(ServiceModel) -# endpoint_fk = ForeignKeyField(EndPointModel) +#class LinkEndPointModel(_Base): +# __tablename__ = 'link_endpoint' +# +# link_uuid = Column(UUID(as_uuid=False), primary_key=True) +# context_uuid = Column(UUID(as_uuid=False), primary_key=True) +# topology_uuid = Column(UUID(as_uuid=False), primary_key=True) +# device_uuid = Column(UUID(as_uuid=False), primary_key=True) +# endpoint_uuid = Column(UUID(as_uuid=False), primary_key=True) +# +# link = relationship('LinkModel', back_populates='link_endpoints', lazy='joined') +# endpoint = relationship('EndPointModel', back_populates='link_endpoints', lazy='joined') +# +# __table_args__ = ( +# ForeignKeyConstraint( +# ['link_uuid'], +# ['link.link_uuid'], +# ondelete='CASCADE'), +# ForeignKeyConstraint( +# ['context_uuid', 'topology_uuid', 'device_uuid', 'endpoint_uuid'], +# ['endpoint.context_uuid', 'endpoint.topology_uuid', 'endpoint.device_uuid', 'endpoint.endpoint_uuid'], +# ondelete='CASCADE'), +# ) + +#class ServiceEndPointModel(_Base): +# __tablename__ = 'service_endpoint' +# +# context_uuid = Column(UUID(as_uuid=False), primary_key=True) +# service_uuid = Column(UUID(as_uuid=False), primary_key=True) +# topology_uuid = Column(UUID(as_uuid=False), primary_key=True) +# device_uuid = Column(UUID(as_uuid=False), primary_key=True) +# endpoint_uuid = Column(UUID(as_uuid=False), primary_key=True) +# +# service = relationship('ServiceModel', back_populates='service_endpoints', lazy='joined') +# endpoint = relationship('EndPointModel', back_populates='service_endpoints', lazy='joined') +# writer = relationship( +# "Writer", +# primaryjoin="and_(Writer.id == foreign(Article.writer_id), " +# "Writer.magazine_id == Article.magazine_id)", +# ) +# +# __table_args__ = ( +# ForeignKeyConstraint( +# ['context_uuid', 'service_uuid'], +# ['service.context_uuid', 'service.service_uuid'], +# ondelete='CASCADE'), +# ForeignKeyConstraint( +# ['context_uuid', 'topology_uuid', 'device_uuid', 'endpoint_uuid'], +# ['endpoint.context_uuid', 'endpoint.topology_uuid', 'endpoint.device_uuid', 'endpoint.endpoint_uuid'], +# ondelete='CASCADE'), +# ) # class SliceEndPointModel(Model): # pk = PrimaryKeyField() @@ -64,12 +81,7 @@ class LinkEndPointModel(_Base): # class SliceServiceModel(Model): # pk = PrimaryKeyField() # slice_fk = ForeignKeyField(SliceModel) -# service_fk = ForeignKeyField(ServiceMo# pylint: disable=abstract-method -# __tablename__ = 'LinkEndPoint' -# uuid = Column(UUID(as_uuid=False), primary_key=True, unique=True) -# link_uuid = Column(UUID(as_uuid=False), ForeignKey("Link.link_uuid")) -# endpoint_uuid = Column(UUID(as_uuid=False), ForeignKey("EndPoint.endpoint_uuid")) -#del) +# service_fk = ForeignKeyField(ServiceModel) # class SliceSubSliceModel(Model): # pk = PrimaryKeyField() @@ -78,40 +90,30 @@ class LinkEndPointModel(_Base): class TopologyDeviceModel(_Base): __tablename__ = 'topology_device' - context_uuid = Column(UUID(as_uuid=False), primary_key=True) - topology_uuid = Column(UUID(as_uuid=False), primary_key=True) - device_uuid = Column(UUID(as_uuid=False), primary_key=True) + + topology_uuid = Column(ForeignKey('topology.topology_uuid', ondelete='RESTRICT'), primary_key=True) + device_uuid = Column(ForeignKey('device.device_uuid', ondelete='CASCADE' ), primary_key=True) topology = relationship('TopologyModel', back_populates='topology_devices', lazy='joined') device = relationship('DeviceModel', back_populates='topology_devices', lazy='joined') - __table_args__ = ( - ForeignKeyConstraint( - ['context_uuid', 'topology_uuid'], - ['topology.context_uuid', 'topology.topology_uuid'], - ondelete='CASCADE'), - ForeignKeyConstraint( - ['device_uuid'], - ['device.device_uuid'], - ondelete='CASCADE'), - ) - -class TopologyLinkModel(_Base): - __tablename__ = 'topology_link' - context_uuid = Column(UUID(as_uuid=False), primary_key=True) - topology_uuid = Column(UUID(as_uuid=False), primary_key=True) - link_uuid = Column(UUID(as_uuid=False), primary_key=True) - - topology = relationship('TopologyModel', back_populates='topology_links', lazy='joined') - link = relationship('LinkModel', back_populates='topology_links', lazy='joined') - - __table_args__ = ( - ForeignKeyConstraint( - ['context_uuid', 'topology_uuid'], - ['topology.context_uuid', 'topology.topology_uuid'], - ondelete='CASCADE'), - ForeignKeyConstraint( - ['link_uuid'], - ['link.link_uuid'], - ondelete='CASCADE'), - ) +#class TopologyLinkModel(_Base): +# __tablename__ = 'topology_link' +# +# context_uuid = Column(UUID(as_uuid=False), primary_key=True) +# topology_uuid = Column(UUID(as_uuid=False), primary_key=True) +# link_uuid = Column(UUID(as_uuid=False), primary_key=True) +# +# topology = relationship('TopologyModel', back_populates='topology_links', lazy='joined') +# link = relationship('LinkModel', back_populates='topology_links', lazy='joined') +# +# __table_args__ = ( +# ForeignKeyConstraint( +# ['context_uuid', 'topology_uuid'], +# ['topology.context_uuid', 'topology.topology_uuid'], +# ondelete='CASCADE'), +# ForeignKeyConstraint( +# ['link_uuid'], +# ['link.link_uuid'], +# ondelete='CASCADE'), +# ) diff --git a/src/context/service/database/models/ServiceModel.py b/src/context/service/database/models/ServiceModel.py index c06baca32..ea4e89526 100644 --- a/src/context/service/database/models/ServiceModel.py +++ b/src/context/service/database/models/ServiceModel.py @@ -12,100 +12,52 @@ # See the License for the specific language governing permissions and # limitations under the License. -import functools, logging, operator -from sqlalchemy import Column, Enum, ForeignKey -from typing import Dict, List -from common.orm.HighLevel import get_related_objects -from common.proto.context_pb2 import ServiceStatusEnum, ServiceTypeEnum -from .ConfigRuleModel import ConfigModel -from .ConstraintModel import ConstraintsModel -from .models.ContextModel import ContextModel -from .Tools import grpc_to_enum +import operator +from sqlalchemy import Column, Enum, Float, ForeignKey, String +from typing import Dict from sqlalchemy.dialects.postgresql import UUID -from context.service.database.models._Base import Base -import enum -LOGGER = logging.getLogger(__name__) - -class ORM_ServiceTypeEnum(enum.Enum): - UNKNOWN = ServiceTypeEnum.SERVICETYPE_UNKNOWN - L3NM = ServiceTypeEnum.SERVICETYPE_L3NM - L2NM = ServiceTypeEnum.SERVICETYPE_L2NM - TAPI_CONNECTIVITY_SERVICE = ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE - -grpc_to_enum__service_type = functools.partial( - grpc_to_enum, ServiceTypeEnum, ORM_ServiceTypeEnum) - -class ORM_ServiceStatusEnum(enum.Enum): - UNDEFINED = ServiceStatusEnum.SERVICESTATUS_UNDEFINED - PLANNED = ServiceStatusEnum.SERVICESTATUS_PLANNED - ACTIVE = ServiceStatusEnum.SERVICESTATUS_ACTIVE - PENDING_REMOVAL = ServiceStatusEnum.SERVICESTATUS_PENDING_REMOVAL - -grpc_to_enum__service_status = functools.partial( - grpc_to_enum, ServiceStatusEnum, ORM_ServiceStatusEnum) - -class ServiceModel(Base): - __tablename__ = 'Service' - - # pk = PrimaryKeyField() - # context_fk = ForeignKeyField(ContextModel) - context_uuid = Column(UUID(as_uuid=False), ForeignKey("Context.context_uuid")) - # service_uuid = StringField(required=True, allow_empty=False) - service_uuid = Column(UUID(as_uuid=False), primary_key=True, unique=True) - # service_type = EnumeratedField(ORM_ServiceTypeEnum, required=True) - service_type = Column(Enum(ORM_ServiceTypeEnum, create_constraint=False, native_enum=False, allow_empty=False)) - # service_constraints_fk = ForeignKeyField(ConstraintsModel) - service_constraints = Column(UUID(as_uuid=False), ForeignKey("Constraints.constraints_uuid")) - # service_status = EnumeratedField(ORM_ServiceStatusEnum, required=True) - service_status = Column(Enum(ORM_ServiceStatusEnum, create_constraint=False, native_enum=False, allow_empty=False)) - # service_config_fk = ForeignKeyField(ConfigModel) - service_config = Column(UUID(as_uuid=False), ForeignKey("Config.config_uuid")) - - # def delete(self) -> None: - # #pylint: disable=import-outside-toplevel - # from .RelationModels import ServiceEndPointModel - # - # for db_service_endpoint_pk,_ in self.references(ServiceEndPointModel): - # ServiceEndPointModel(self.database, db_service_endpoint_pk).delete() - # - # super().delete() - # - # ConfigModel(self.database, self.service_config_fk).delete() - # ConstraintsModel(self.database, self.service_constraints_fk).delete() - - def main_pk_name(self): - return 'context_uuid' - +from sqlalchemy.orm import relationship +from .enums.ServiceStatus import ORM_ServiceStatusEnum +from .enums.ServiceType import ORM_ServiceTypeEnum +from ._Base import _Base + +class ServiceModel(_Base): + __tablename__ = 'service' + + context_uuid = Column(UUID(as_uuid=False), ForeignKey('context.context_uuid'), primary_key=True) + service_uuid = Column(UUID(as_uuid=False), primary_key=True) + service_name = Column(String, nullable=False) + service_type = Column(Enum(ORM_ServiceTypeEnum)) + service_status = Column(Enum(ORM_ServiceStatusEnum)) + created_at = Column(Float) + + context = relationship('ContextModel', back_populates='services') + service_endpoints = relationship('ServiceEndPointModel', back_populates='service') #, lazy='joined') + #constraints = relationship('ConstraintModel', passive_deletes=True, back_populates='service', lazy='joined') + config_rules = relationship('ConfigRuleModel', passive_deletes=True, back_populates='service', lazy='joined') def dump_id(self) -> Dict: - context_id = ContextModel(self.database, self.context_fk).dump_id() return { - 'context_id': context_id, + 'context_id': self.context.dump_id(), 'service_uuid': {'uuid': self.service_uuid}, } - # def dump_endpoint_ids(self, endpoints) -> List[Dict]: - # from .RelationModels import ServiceEndPointModel # pylint: disable=import-outside-toplevel - # db_endpoints = get_related_objects(self, ServiceEndPointModel, 'endpoint_fk') - # return [db_endpoint.dump_id() for db_endpoint in sorted(db_endpoints, key=operator.attrgetter('pk'))] - - def dump_constraints(self) -> List[Dict]: - return ConstraintsModel(self.database, self.service_constraints_fk).dump() - - def dump_config(self) -> Dict: - return ConfigModel(self.database, self.service_config_fk).dump() - - def dump( # pylint: disable=arguments-differ - self, endpoint_ids=True, constraints=True, config_rules=True) -> Dict: - result = { - 'service_id': self.dump_id(), - 'service_type': self.service_type.value, - 'service_status': {'service_status': self.service_status.value}, + def dump(self) -> Dict: + return { + 'service_id' : self.dump_id(), + 'name' : self.service_name, + 'service_type' : self.service_type.value, + 'service_status' : {'service_status': self.service_status.value}, + 'service_endpoint_ids': [ + service_endpoint.endpoint.dump_id() + for service_endpoint in self.service_endpoints + ], + 'service_constraints' : [ + #constraint.dump() + #for constraint in sorted(self.constraints, key=operator.attrgetter('position')) + ], + 'service_config' : {'config_rules': [ + config_rule.dump() + for config_rule in sorted(self.config_rules, key=operator.attrgetter('position')) + ]}, } - if endpoint_ids: - result['service_endpoint_ids'] = self.dump_endpoint_ids() - if constraints: - result['service_constraints'] = self.dump_constraints() - if config_rules: - result.setdefault('service_config', {})['config_rules'] = self.dump_config() - return result diff --git a/src/context/service/database/models/TopologyModel.py b/src/context/service/database/models/TopologyModel.py index 95f7a6350..f7053b603 100644 --- a/src/context/service/database/models/TopologyModel.py +++ b/src/context/service/database/models/TopologyModel.py @@ -13,23 +13,21 @@ # limitations under the License. from typing import Dict -from sqlalchemy import Column, Float, ForeignKey, String +from sqlalchemy import Column, ForeignKey, String from sqlalchemy.dialects.postgresql import UUID from sqlalchemy.orm import relationship from ._Base import _Base class TopologyModel(_Base): __tablename__ = 'topology' - context_uuid = Column(UUID(as_uuid=False), ForeignKey('context.context_uuid'), primary_key=True) - topology_uuid = Column(UUID(as_uuid=False), primary_key=True, unique=True) - topology_name = Column(String(), nullable=False) - created_at = Column(Float) - # Relationships - context = relationship('ContextModel', back_populates='topologies') + topology_uuid = Column(UUID(as_uuid=False), primary_key=True) + context_uuid = Column(UUID(as_uuid=False), ForeignKey('context.context_uuid')) + topology_name = Column(String, nullable=False) + + context = relationship('ContextModel', back_populates='topologies') topology_devices = relationship('TopologyDeviceModel', back_populates='topology') - topology_links = relationship('TopologyLinkModel', back_populates='topology') - endpoints = relationship('EndPointModel', back_populates='topology') + #topology_links = relationship('TopologyLinkModel', back_populates='topology') def dump_id(self) -> Dict: return { @@ -42,5 +40,5 @@ class TopologyModel(_Base): 'topology_id': self.dump_id(), 'name' : self.topology_name, 'device_ids' : [{'device_uuid': {'uuid': td.device_uuid}} for td in self.topology_devices], - 'link_ids' : [{'link_uuid' : {'uuid': td.link_uuid }} for td in self.topology_links ], + #'link_ids' : [{'link_uuid' : {'uuid': td.link_uuid }} for td in self.topology_links ], } diff --git a/src/context/service/database/models/enums/ServiceStatus.py b/src/context/service/database/models/enums/ServiceStatus.py new file mode 100644 index 000000000..5afd5da8f --- /dev/null +++ b/src/context/service/database/models/enums/ServiceStatus.py @@ -0,0 +1,26 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import enum, functools +from common.proto.context_pb2 import ServiceStatusEnum +from ._GrpcToEnum import grpc_to_enum + +class ORM_ServiceStatusEnum(enum.Enum): + UNDEFINED = ServiceStatusEnum.SERVICESTATUS_UNDEFINED + PLANNED = ServiceStatusEnum.SERVICESTATUS_PLANNED + ACTIVE = ServiceStatusEnum.SERVICESTATUS_ACTIVE + PENDING_REMOVAL = ServiceStatusEnum.SERVICESTATUS_PENDING_REMOVAL + +grpc_to_enum__service_status = functools.partial( + grpc_to_enum, ServiceStatusEnum, ORM_ServiceStatusEnum) diff --git a/src/context/service/database/models/enums/ServiceType.py b/src/context/service/database/models/enums/ServiceType.py new file mode 100644 index 000000000..e36cbc389 --- /dev/null +++ b/src/context/service/database/models/enums/ServiceType.py @@ -0,0 +1,26 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import enum, functools +from common.proto.context_pb2 import ServiceTypeEnum +from ._GrpcToEnum import grpc_to_enum + +class ORM_ServiceTypeEnum(enum.Enum): + UNKNOWN = ServiceTypeEnum.SERVICETYPE_UNKNOWN + L3NM = ServiceTypeEnum.SERVICETYPE_L3NM + L2NM = ServiceTypeEnum.SERVICETYPE_L2NM + TAPI_CONNECTIVITY_SERVICE = ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE + +grpc_to_enum__service_type = functools.partial( + grpc_to_enum, ServiceTypeEnum, ORM_ServiceTypeEnum) diff --git a/src/context/tests/Objects.py b/src/context/tests/Objects.py index 3bb0065d3..1e50fe3c1 100644 --- a/src/context/tests/Objects.py +++ b/src/context/tests/Objects.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID +from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME from common.proto.kpi_sample_types_pb2 import KpiSampleType from common.tools.object_factory.ConfigRule import json_config_rule_set from common.tools.object_factory.Connection import json_connection, json_connection_id @@ -27,13 +27,15 @@ from common.tools.object_factory.PolicyRule import json_policy_rule, json_policy # ----- Context -------------------------------------------------------------------------------------------------------- -CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_UUID) -CONTEXT = json_context(DEFAULT_CONTEXT_UUID) +CONTEXT_NAME = DEFAULT_CONTEXT_NAME +CONTEXT_ID = json_context_id(CONTEXT_NAME) +CONTEXT = json_context(CONTEXT_NAME, name=CONTEXT_NAME) # ----- Topology ------------------------------------------------------------------------------------------------------- -TOPOLOGY_ID = json_topology_id(DEFAULT_TOPOLOGY_UUID, context_id=CONTEXT_ID) -TOPOLOGY = json_topology(DEFAULT_TOPOLOGY_UUID, context_id=CONTEXT_ID) +TOPOLOGY_NAME = DEFAULT_TOPOLOGY_NAME +TOPOLOGY_ID = json_topology_id(TOPOLOGY_NAME, context_id=CONTEXT_ID) +TOPOLOGY = json_topology(TOPOLOGY_NAME, context_id=CONTEXT_ID, name=TOPOLOGY_NAME) # ----- KPI Sample Types ----------------------------------------------------------------------------------------------- @@ -52,8 +54,8 @@ EP3 = '368baf47-0540-4ab4-add8-a19b5167162c' EP100 = '6a923121-36e1-4b5e-8cd6-90aceca9b5cf' -DEVICE_R1_UUID = 'fe83a200-6ded-47b4-b156-3bb3556a10d6' -DEVICE_R1_ID = json_device_id(DEVICE_R1_UUID) +DEVICE_R1_NAME = 'R1' +DEVICE_R1_ID = json_device_id(DEVICE_R1_NAME) DEVICE_R1_EPS = [ json_endpoint(DEVICE_R1_ID, EP2, '10G', topology_id=TOPOLOGY_ID, kpi_sample_types=PACKET_PORT_SAMPLE_TYPES), json_endpoint(DEVICE_R1_ID, EP3, '10G', topology_id=TOPOLOGY_ID, kpi_sample_types=PACKET_PORT_SAMPLE_TYPES), @@ -65,11 +67,11 @@ DEVICE_R1_RULES = [ json_config_rule_set('dev/rsrc3/value', 'value3'), ] DEVICE_R1 = json_device_packetrouter_disabled( - DEVICE_R1_UUID, endpoints=DEVICE_R1_EPS, config_rules=DEVICE_R1_RULES) + DEVICE_R1_NAME, endpoints=DEVICE_R1_EPS, config_rules=DEVICE_R1_RULES) -DEVICE_R2_UUID = '2fd2be23-5b20-414c-b1ea-2f16ae6eb425' -DEVICE_R2_ID = json_device_id(DEVICE_R2_UUID) +DEVICE_R2_NAME = 'R2' +DEVICE_R2_ID = json_device_id(DEVICE_R2_NAME) DEVICE_R2_EPS = [ json_endpoint(DEVICE_R2_ID, EP1, '10G', topology_id=TOPOLOGY_ID, kpi_sample_types=PACKET_PORT_SAMPLE_TYPES), json_endpoint(DEVICE_R2_ID, EP3, '10G', topology_id=TOPOLOGY_ID, kpi_sample_types=PACKET_PORT_SAMPLE_TYPES), @@ -81,11 +83,11 @@ DEVICE_R2_RULES = [ json_config_rule_set('dev/rsrc3/value', 'value6'), ] DEVICE_R2 = json_device_packetrouter_disabled( - DEVICE_R2_UUID, endpoints=DEVICE_R2_EPS, config_rules=DEVICE_R2_RULES) + DEVICE_R2_NAME, endpoints=DEVICE_R2_EPS, config_rules=DEVICE_R2_RULES) -DEVICE_R3_UUID = '3e71a251-2218-42c5-b4b8-de7760c0d9b3' -DEVICE_R3_ID = json_device_id(DEVICE_R3_UUID) +DEVICE_R3_NAME = 'R3' +DEVICE_R3_ID = json_device_id(DEVICE_R3_NAME) DEVICE_R3_EPS = [ json_endpoint(DEVICE_R3_ID, EP2, '10G', topology_id=TOPOLOGY_ID, kpi_sample_types=PACKET_PORT_SAMPLE_TYPES), json_endpoint(DEVICE_R3_ID, EP3, '10G', topology_id=TOPOLOGY_ID, kpi_sample_types=PACKET_PORT_SAMPLE_TYPES), @@ -97,7 +99,7 @@ DEVICE_R3_RULES = [ json_config_rule_set('dev/rsrc3/value', 'value6'), ] DEVICE_R3 = json_device_packetrouter_disabled( - DEVICE_R3_UUID, endpoints=DEVICE_R3_EPS, config_rules=DEVICE_R3_RULES) + DEVICE_R3_NAME, endpoints=DEVICE_R3_EPS, config_rules=DEVICE_R3_RULES) # ----- Link ----------------------------------------------------------------------------------------------------------- diff --git a/src/context/tests/test_unitary.py b/src/context/tests/__test_unitary.py similarity index 64% rename from src/context/tests/test_unitary.py rename to src/context/tests/__test_unitary.py index 6845036bd..e49fd2752 100644 --- a/src/context/tests/test_unitary.py +++ b/src/context/tests/__test_unitary.py @@ -12,31 +12,31 @@ # See the License for the specific language governing permissions and # limitations under the License. -import pytest -from context.client.ContextClient import ContextClient -from ._test_context import grpc_context -from ._test_topology import grpc_topology -from ._test_device import grpc_device -from ._test_link import grpc_link +#import pytest +#from context.client.ContextClient import ContextClient +#from .test_unitary_context import grpc_context +#from ._test_topology import grpc_topology +#from ._test_device import grpc_device +#from ._test_link import grpc_link #from ._test_service import grpc_service #from ._test_slice import grpc_slice #from ._test_connection import grpc_connection #from ._test_policy import grpc_policy -def test_grpc_context(context_client_grpc : ContextClient) -> None: - grpc_context(context_client_grpc) +#def test_grpc_context(context_client_grpc : ContextClient) -> None: +# grpc_context(context_client_grpc) -@pytest.mark.depends(on=['test_grpc_context']) -def test_grpc_topology(context_client_grpc : ContextClient) -> None: - grpc_topology(context_client_grpc) +#@pytest.mark.depends(on=['test_grpc_context']) +#def test_grpc_topology(context_client_grpc : ContextClient) -> None: +# grpc_topology(context_client_grpc) -@pytest.mark.depends(on=['test_grpc_topology']) -def test_grpc_device(context_client_grpc : ContextClient) -> None: - grpc_device(context_client_grpc) +#@pytest.mark.depends(on=['test_grpc_topology']) +#def test_grpc_device(context_client_grpc : ContextClient) -> None: +# grpc_device(context_client_grpc) -@pytest.mark.depends(on=['test_grpc_device']) -def test_grpc_link(context_client_grpc : ContextClient) -> None: - grpc_link(context_client_grpc) +#@pytest.mark.depends(on=['test_grpc_device']) +#def test_grpc_link(context_client_grpc : ContextClient) -> None: +# grpc_link(context_client_grpc) #@pytest.mark.depends(on=['test_grpc_link']) #def test_grpc_service(context_client_grpc : ContextClient) -> None: diff --git a/src/context/tests/_test_link.py b/src/context/tests/_test_link.py index d493f23d7..963fd72cf 100644 --- a/src/context/tests/_test_link.py +++ b/src/context/tests/_test_link.py @@ -21,7 +21,7 @@ from .Objects import ( CONTEXT, CONTEXT_ID, DEVICE_R1, DEVICE_R1_ID, DEVICE_R1_UUID, DEVICE_R2, DEVICE_R2_ID, DEVICE_R2_UUID, LINK_R1_R2, LINK_R1_R2_ID, LINK_R1_R2_UUID, TOPOLOGY, TOPOLOGY_ID) -def grpc_link(context_client_grpc: ContextClient) -> None: +def grpc_link(context_client_grpc : ContextClient) -> None: # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- #events_collector = EventsCollector( @@ -78,10 +78,10 @@ def grpc_link(context_client_grpc: ContextClient) -> None: assert response.link_uuid.uuid == LINK_R1_R2_UUID # ----- Check create event ----------------------------------------------------------------------------------------- - # event = events_collector.get_event(block=True) - # assert isinstance(event, LinkEvent) - # assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE - # assert event.link_id.link_uuid.uuid == LINK_R1_R2_UUID + #event = events_collector.get_event(block=True) + #assert isinstance(event, LinkEvent) + #assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert event.link_id.link_uuid.uuid == LINK_R1_R2_UUID # ----- Get when the object exists --------------------------------------------------------------------------------- response = context_client_grpc.GetLink(LinkId(**LINK_R1_R2_ID)) @@ -108,10 +108,10 @@ def grpc_link(context_client_grpc: ContextClient) -> None: assert response.link_uuid.uuid == LINK_R1_R2_UUID # ----- Check update event ----------------------------------------------------------------------------------------- - # event = events_collector.get_event(block=True) - # assert isinstance(event, LinkEvent) - # assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - # assert event.link_id.link_uuid.uuid == LINK_R1_R2_UUID + #event = events_collector.get_event(block=True) + #assert isinstance(event, LinkEvent) + #assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + #assert event.link_id.link_uuid.uuid == LINK_R1_R2_UUID # ----- Get when the object is modified ---------------------------------------------------------------------------- response = context_client_grpc.GetLink(LinkId(**LINK_R1_R2_ID)) @@ -138,11 +138,11 @@ def grpc_link(context_client_grpc: ContextClient) -> None: assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID # ----- Check update event ----------------------------------------------------------------------------------------- - # event = events_collector.get_event(block=True) - # assert isinstance(event, TopologyEvent) - # assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - # assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - # assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + #event = events_collector.get_event(block=True) + #assert isinstance(event, TopologyEvent) + #assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + #assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + #assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID # ----- Check relation was created --------------------------------------------------------------------------------- response = context_client_grpc.GetTopology(TopologyId(**TOPOLOGY_ID)) @@ -155,35 +155,30 @@ def grpc_link(context_client_grpc: ContextClient) -> None: assert response.link_ids[0].link_uuid.uuid == LINK_R1_R2_UUID # ----- Remove the object ------------------------------------------------------------------------------------------ - #context_client_grpc.RemoveLink(LinkId(**LINK_R1_R2_ID)) - #context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R1_ID)) - #context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R2_ID)) - #context_client_grpc.RemoveTopology(TopologyId(**TOPOLOGY_ID)) - #context_client_grpc.RemoveContext(ContextId(**CONTEXT_ID)) + context_client_grpc.RemoveLink(LinkId(**LINK_R1_R2_ID)) + context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R1_ID)) + context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R2_ID)) + context_client_grpc.RemoveTopology(TopologyId(**TOPOLOGY_ID)) + context_client_grpc.RemoveContext(ContextId(**CONTEXT_ID)) # ----- Check remove event ----------------------------------------------------------------------------------------- - # events = events_collector.get_events(block=True, count=5) - # - # assert isinstance(events[0], LinkEvent) - # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - # assert events[0].link_id.link_uuid.uuid == LINK_R1_R2_UUID - # - # assert isinstance(events[1], DeviceEvent) - # assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - # assert events[1].device_id.device_uuid.uuid == DEVICE_R1_UUID - # - # assert isinstance(events[2], DeviceEvent) - # assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - # assert events[2].device_id.device_uuid.uuid == DEVICE_R2_UUID - # - # assert isinstance(events[3], TopologyEvent) - # assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - # assert events[3].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - # assert events[3].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - # - # assert isinstance(events[4], ContextEvent) - # assert events[4].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - # assert events[4].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + #events = events_collector.get_events(block=True, count=5) + #assert isinstance(events[0], LinkEvent) + #assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert events[0].link_id.link_uuid.uuid == LINK_R1_R2_UUID + #assert isinstance(events[1], DeviceEvent) + #assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert events[1].device_id.device_uuid.uuid == DEVICE_R1_UUID + #assert isinstance(events[2], DeviceEvent) + #assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert events[2].device_id.device_uuid.uuid == DEVICE_R2_UUID + #assert isinstance(events[3], TopologyEvent) + #assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert events[3].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + #assert events[3].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + #assert isinstance(events[4], ContextEvent) + #assert events[4].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert events[4].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- #events_collector.stop() diff --git a/src/context/tests/_test_service.py b/src/context/tests/_test_service.py index 88ece2ba9..8bd6570de 100644 --- a/src/context/tests/_test_service.py +++ b/src/context/tests/_test_service.py @@ -13,28 +13,24 @@ # limitations under the License. import copy, grpc, pytest -from typing import Tuple from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID from common.proto.context_pb2 import ( - Context, ContextEvent, ContextId, Device, DeviceEvent, DeviceId, EventTypeEnum, Service, ServiceEvent, ServiceId, - ServiceStatusEnum, ServiceTypeEnum, Topology, TopologyEvent, TopologyId) + Context, ContextId, Device, DeviceId, Service, ServiceId, ServiceStatusEnum, ServiceTypeEnum, Topology, TopologyId) from context.client.ContextClient import ContextClient -from context.client.EventsCollector import EventsCollector +#from context.client.EventsCollector import EventsCollector from .Objects import ( CONTEXT, CONTEXT_ID, DEVICE_R1, DEVICE_R1_ID, DEVICE_R1_UUID, DEVICE_R2, DEVICE_R2_ID, DEVICE_R2_UUID, SERVICE_R1_R2, SERVICE_R1_R2_ID, SERVICE_R1_R2_UUID, TOPOLOGY, TOPOLOGY_ID) -def grpc_service( - context_client_grpc : ContextClient, # pylint: disable=redefined-outer-name - context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name - Session = context_db_mb[0] - # ----- Clean the database ----------------------------------------------------------------------------------------- - database = Database(Session) - database.clear() +def grpc_service(context_client_grpc : ContextClient) -> None: # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- - events_collector = EventsCollector(context_client_grpc) - events_collector.start() + #events_collector = EventsCollector( + # context_client_grpc, log_events_received=True, + # activate_context_collector = False, activate_topology_collector = False, activate_device_collector = False, + # activate_link_collector = False, activate_service_collector = True, activate_slice_collector = False, + # activate_connection_collector = False) + #events_collector.start() # ----- Prepare dependencies for the test and capture related events ----------------------------------------------- response = context_client_grpc.SetContext(Context(**CONTEXT)) @@ -49,49 +45,39 @@ def grpc_service( response = context_client_grpc.SetDevice(Device(**DEVICE_R2)) assert response.device_uuid.uuid == DEVICE_R2_UUID + # events = events_collector.get_events(block=True, count=4) - # # assert isinstance(events[0], ContextEvent) # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE # assert events[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - # # assert isinstance(events[1], TopologyEvent) # assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE # assert events[1].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID # assert events[1].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - # # assert isinstance(events[2], DeviceEvent) # assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_CREATE # assert events[2].device_id.device_uuid.uuid == DEVICE_R1_UUID - # # assert isinstance(events[3], DeviceEvent) # assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_CREATE # assert events[3].device_id.device_uuid.uuid == DEVICE_R2_UUID - LOGGER.info('----------------') # ----- Get when the object does not exist ------------------------------------------------------------------------- with pytest.raises(grpc.RpcError) as e: context_client_grpc.GetService(ServiceId(**SERVICE_R1_R2_ID)) assert e.value.code() == grpc.StatusCode.NOT_FOUND assert e.value.details() == 'Service({:s}) not found'.format(SERVICE_R1_R2_UUID) - LOGGER.info('----------------') # ----- List when the object does not exist ------------------------------------------------------------------------ + response = context_client_grpc.GetContext(ContextId(**CONTEXT_ID)) + assert len(response.topology_ids) == 1 + assert len(response.service_ids) == 0 + assert len(response.slice_ids) == 0 + response = context_client_grpc.ListServiceIds(ContextId(**CONTEXT_ID)) assert len(response.service_ids) == 0 - LOGGER.info('----------------') response = context_client_grpc.ListServices(ContextId(**CONTEXT_ID)) assert len(response.services) == 0 - LOGGER.info('----------------') - - # ----- Dump state of database before create the object ------------------------------------------------------------ - db_entries = database.dump_all() - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(db_entry) - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 80 # ----- Create the object ------------------------------------------------------------------------------------------ with pytest.raises(grpc.RpcError) as e: @@ -108,54 +94,77 @@ def grpc_service( assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID assert response.service_uuid.uuid == SERVICE_R1_R2_UUID - CONTEXT_WITH_SERVICE = copy.deepcopy(CONTEXT) - CONTEXT_WITH_SERVICE['service_ids'].append(SERVICE_R1_R2_ID) - response = context_client_grpc.SetContext(Context(**CONTEXT_WITH_SERVICE)) - assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID - # ----- Check create event ----------------------------------------------------------------------------------------- - events = events_collector.get_events(block=True, count=2) + #event = events_collector.get_event(block=True) + #assert isinstance(event, ServiceEvent) + #assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert event.service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + #assert event.service_id.service_uuid.uuid == SERVICE_R1_R2_UUID + + # ----- Get when the object exists --------------------------------------------------------------------------------- + response = context_client_grpc.GetContext(ContextId(**CONTEXT_ID)) + assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.name == '' + assert len(response.topology_ids) == 1 + assert len(response.service_ids) == 1 + assert response.service_ids[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.service_ids[0].service_uuid.uuid == SERVICE_R1_R2_UUID + assert len(response.slice_ids) == 0 + + response = context_client_grpc.GetService(ServiceId(**SERVICE_R1_R2_ID)) + assert response.service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.service_id.service_uuid.uuid == SERVICE_R1_R2_UUID + assert response.name == '' + assert response.service_type == ServiceTypeEnum.SERVICETYPE_L3NM + assert len(response.service_endpoint_ids) == 2 + assert len(response.service_constraints) == 2 + assert response.service_status.service_status == ServiceStatusEnum.SERVICESTATUS_PLANNED + assert len(response.service_config.config_rules) == 3 - assert isinstance(events[0], ServiceEvent) - assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[0].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert events[0].service_id.service_uuid.uuid == SERVICE_R1_R2_UUID + # ----- List when the object exists -------------------------------------------------------------------------------- + response = context_client_grpc.ListServiceIds(ContextId(**CONTEXT_ID)) + assert len(response.service_ids) == 1 + assert response.service_ids[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.service_ids[0].service_uuid.uuid == SERVICE_R1_R2_UUID - assert isinstance(events[1], ContextEvent) - assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - assert events[1].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + response = context_client_grpc.ListServices(ContextId(**CONTEXT_ID)) + assert len(response.services) == 1 + assert response.services[0].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.services[0].service_id.service_uuid.uuid == SERVICE_R1_R2_UUID + assert response.services[0].name == '' + assert response.service_type == ServiceTypeEnum.SERVICETYPE_L3NM + assert len(response.service_endpoint_ids) == 2 + assert len(response.service_constraints) == 2 + assert response.service_status.service_status == ServiceStatusEnum.SERVICESTATUS_PLANNED + assert len(response.service_config.config_rules) == 3 # ----- Update the object ------------------------------------------------------------------------------------------ - response = context_client_grpc.SetService(Service(**SERVICE_R1_R2)) + new_service_name = 'svc:r1-r2' + SERVICE_UPDATED = copy.deepcopy(SERVICE_R1_R2) + SERVICE_UPDATED['name'] = new_service_name + response = context_client_grpc.SetService(Service(**SERVICE_UPDATED)) assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID assert response.service_uuid.uuid == SERVICE_R1_R2_UUID # ----- Check update event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True) - assert isinstance(event, ServiceEvent) - assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - assert event.service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert event.service_id.service_uuid.uuid == SERVICE_R1_R2_UUID - - # ----- Dump state of database after create/update the object ------------------------------------------------------ - db_entries = context_database.dump() - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 108 + #event = events_collector.get_event(block=True) + #assert isinstance(event, ServiceEvent) + #assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + #assert event.service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + #assert event.service_id.service_uuid.uuid == SERVICE_R1_R2_UUID - # ----- Get when the object exists --------------------------------------------------------------------------------- + # ----- Get when the object is modified ---------------------------------------------------------------------------- response = context_client_grpc.GetService(ServiceId(**SERVICE_R1_R2_ID)) assert response.service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID assert response.service_id.service_uuid.uuid == SERVICE_R1_R2_UUID + assert response.name == new_service_name assert response.service_type == ServiceTypeEnum.SERVICETYPE_L3NM assert len(response.service_endpoint_ids) == 2 assert len(response.service_constraints) == 2 assert response.service_status.service_status == ServiceStatusEnum.SERVICESTATUS_PLANNED assert len(response.service_config.config_rules) == 3 - # ----- List when the object exists -------------------------------------------------------------------------------- + # ----- List when the object is modified --------------------------------------------------------------------------- response = context_client_grpc.ListServiceIds(ContextId(**CONTEXT_ID)) assert len(response.service_ids) == 1 assert response.service_ids[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID @@ -165,6 +174,7 @@ def grpc_service( assert len(response.services) == 1 assert response.services[0].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID assert response.services[0].service_id.service_uuid.uuid == SERVICE_R1_R2_UUID + assert response.services[0].name == new_service_name assert response.services[0].service_type == ServiceTypeEnum.SERVICETYPE_L3NM assert len(response.services[0].service_endpoint_ids) == 2 assert len(response.services[0].service_constraints) == 2 @@ -173,42 +183,45 @@ def grpc_service( # ----- Remove the object ------------------------------------------------------------------------------------------ context_client_grpc.RemoveService(ServiceId(**SERVICE_R1_R2_ID)) - context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R1_ID)) - context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R2_ID)) - context_client_grpc.RemoveTopology(TopologyId(**TOPOLOGY_ID)) - context_client_grpc.RemoveContext(ContextId(**CONTEXT_ID)) # ----- Check remove event ----------------------------------------------------------------------------------------- - events = events_collector.get_events(block=True, count=5) - - assert isinstance(events[0], ServiceEvent) - assert events[0].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert events[0].service_id.service_uuid.uuid == SERVICE_R1_R2_UUID + #event = events_collector.get_event(block=True) + #assert isinstance(event, ServiceEvent) + #assert event.service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + #assert event.service_id.service_uuid.uuid == SERVICE_R1_R2_UUID + + # ----- List after deleting the object ----------------------------------------------------------------------------- + response = context_client_grpc.GetContext(ContextId(**CONTEXT_ID)) + assert len(response.topology_ids) == 1 + assert len(response.service_ids) == 0 + assert len(response.slice_ids) == 0 - assert isinstance(events[1], DeviceEvent) - assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[1].device_id.device_uuid.uuid == DEVICE_R1_UUID + response = context_client_grpc.ListServiceIds(ContextId(**CONTEXT_ID)) + assert len(response.service_ids) == 0 - assert isinstance(events[2], DeviceEvent) - assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[2].device_id.device_uuid.uuid == DEVICE_R2_UUID + response = context_client_grpc.ListServices(ContextId(**CONTEXT_ID)) + assert len(response.services) == 0 - assert isinstance(events[3], TopologyEvent) - assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[3].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert events[3].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + # ----- Clean dependencies used in the test and capture related events --------------------------------------------- + context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R1_ID)) + context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R2_ID)) + context_client_grpc.RemoveTopology(TopologyId(**TOPOLOGY_ID)) + context_client_grpc.RemoveContext(ContextId(**CONTEXT_ID)) - assert isinstance(events[4], ContextEvent) - assert events[4].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[4].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + #events = events_collector.get_events(block=True, count=4) + #assert isinstance(events[0], DeviceEvent) + #assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert events[0].device_id.device_uuid.uuid == DEVICE_R1_UUID + #assert isinstance(events[1], DeviceEvent) + #assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert events[1].device_id.device_uuid.uuid == DEVICE_R2_UUID + #assert isinstance(events[2], TopologyEvent) + #assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert events[2].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + #assert events[2].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + #assert isinstance(events[3], ContextEvent) + #assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert events[3].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- - events_collector.stop() - - # ----- Dump state of database after remove the object ------------------------------------------------------------- - db_entries = context_database.dump() - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 0 + #events_collector.stop() diff --git a/src/context/tests/conftest.py b/src/context/tests/conftest.py index cf56ed9af..872c51ccf 100644 --- a/src/context/tests/conftest.py +++ b/src/context/tests/conftest.py @@ -28,10 +28,6 @@ from context.service.ContextService import ContextService from context.service.Database import Database from context.service.Engine import Engine from context.service.database.models._Base import rebuild_database -#from context.service._old_code.Populate import populate -#from context.service.rest_server.RestServer import RestServer -#from context.service.rest_server.Resources import RESOURCES - LOCAL_HOST = '127.0.0.1' GRPC_PORT = 10000 + int(get_service_port_grpc(ServiceNameEnum.CONTEXT)) # avoid privileged ports @@ -41,29 +37,8 @@ os.environ[get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_HOST os.environ[get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(GRPC_PORT) os.environ[get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_HTTP)] = str(HTTP_PORT) -#DEFAULT_REDIS_SERVICE_HOST = LOCAL_HOST -#DEFAULT_REDIS_SERVICE_PORT = 6379 -#DEFAULT_REDIS_DATABASE_ID = 0 - -#REDIS_CONFIG = { -# 'REDIS_SERVICE_HOST': os.environ.get('REDIS_SERVICE_HOST', DEFAULT_REDIS_SERVICE_HOST), -# 'REDIS_SERVICE_PORT': os.environ.get('REDIS_SERVICE_PORT', DEFAULT_REDIS_SERVICE_PORT), -# 'REDIS_DATABASE_ID' : os.environ.get('REDIS_DATABASE_ID', DEFAULT_REDIS_DATABASE_ID ), -#} - -#SCENARIOS = [ -# ('db:cockroach_mb:inmemory', None, {}, None, {}), -# ('all_inmemory', DatabaseBackendEnum.INMEMORY, {}, MessageBrokerBackendEnum.INMEMORY, {} ) -# ('all_redis', DatabaseBackendEnum.REDIS, REDIS_CONFIG, MessageBrokerBackendEnum.REDIS, REDIS_CONFIG), -#] - -#@pytest.fixture(scope='session', ids=[str(scenario[0]) for scenario in SCENARIOS], params=SCENARIOS) @pytest.fixture(scope='session') def context_db_mb(request) -> Tuple[sqlalchemy.engine.Engine, MessageBroker]: # pylint: disable=unused-argument - #name,db_session,mb_backend,mb_settings = request.param - #msg = 'Running scenario {:s} db_session={:s}, mb_backend={:s}, mb_settings={:s}...' - #LOGGER.info(msg.format(str(name), str(db_session), str(mb_backend.value), str(mb_settings))) - _db_engine = Engine.get_engine() Engine.drop_database(_db_engine) Engine.create_database(_db_engine) @@ -76,7 +51,7 @@ def context_db_mb(request) -> Tuple[sqlalchemy.engine.Engine, MessageBroker]: RAW_METRICS = None @pytest.fixture(scope='session') -def context_service_grpc(context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name +def context_service(context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name global RAW_METRICS # pylint: disable=global-statement _service = ContextService(context_db_mb[0], context_db_mb[1]) RAW_METRICS = _service.context_servicer._get_metrics() @@ -84,22 +59,8 @@ def context_service_grpc(context_db_mb : Tuple[Database, MessageBroker]): # pyli yield _service _service.stop() -#@pytest.fixture(scope='session') -#def context_service_rest(context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name -# database = context_db_mb[0] -# _rest_server = RestServer() -# for endpoint_name, resource_class, resource_url in RESOURCES: -# _rest_server.add_resource(resource_class, resource_url, endpoint=endpoint_name, resource_class_args=(database,)) -# _rest_server.start() -# time.sleep(1) # bring time for the server to start -# yield _rest_server -# _rest_server.shutdown() -# _rest_server.join() - @pytest.fixture(scope='session') -def context_client_grpc( - context_service_grpc : ContextService # pylint: disable=redefined-outer-name,unused-argument -): +def context_client(context_service : ContextService): # pylint: disable=redefined-outer-name,unused-argument _client = ContextClient() yield _client _client.close() @@ -117,7 +78,7 @@ def pytest_terminal_summary( elif '_HISTOGRAM_' in raw_metric_name: method_name,metric_name = raw_metric_name.split('_HISTOGRAM_') else: - raise Exception('Unsupported metric: {:s}'.format(raw_metric_name)) + raise Exception('Unsupported metric: {:s}'.format(raw_metric_name)) # pragma: no cover metric_data = method_to_metric_fields.setdefault(method_name, dict()).setdefault(metric_name, dict()) for field_name,labels,value,_,_ in raw_metric_data._child_samples(): if len(labels) > 0: field_name = '{:s}:{:s}'.format(field_name, json.dumps(labels, sort_keys=True)) diff --git a/src/context/tests/_test_context.py b/src/context/tests/test_context.py similarity index 55% rename from src/context/tests/_test_context.py rename to src/context/tests/test_context.py index ef67d39d7..915989eb7 100644 --- a/src/context/tests/_test_context.py +++ b/src/context/tests/test_context.py @@ -12,96 +12,66 @@ # See the License for the specific language governing permissions and # limitations under the License. -import copy, grpc, pytest, uuid -from common.Constants import DEFAULT_CONTEXT_UUID +import copy, grpc, pytest from common.proto.context_pb2 import Context, ContextId, Empty -from common.tools.object_factory.Context import json_context_id -from common.tools.object_factory.Service import json_service_id -from common.tools.object_factory.Slice import json_slice_id -from common.tools.object_factory.Topology import json_topology_id from context.client.ContextClient import ContextClient +from context.service.database.methods.uuids.Context import context_get_uuid #from context.client.EventsCollector import EventsCollector -from .Objects import CONTEXT, CONTEXT_ID +from .Objects import CONTEXT, CONTEXT_ID, CONTEXT_NAME -def grpc_context(context_client_grpc : ContextClient) -> None: +def test_context(context_client : ContextClient) -> None: # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- #events_collector = EventsCollector( - # context_client_grpc, log_events_received=True, + # context_client, log_events_received=True, # activate_context_collector = True, activate_topology_collector = False, activate_device_collector = False, # activate_link_collector = False, activate_service_collector = False, activate_slice_collector = False, # activate_connection_collector = False) #events_collector.start() # ----- Get when the object does not exist ------------------------------------------------------------------------- + context_id = ContextId(**CONTEXT_ID) + context_uuid = context_get_uuid(context_id, allow_random=False) with pytest.raises(grpc.RpcError) as e: - context_client_grpc.GetContext(ContextId(**CONTEXT_ID)) + context_client.GetContext(context_id) assert e.value.code() == grpc.StatusCode.NOT_FOUND - assert e.value.details() == 'Context({:s}) not found'.format(DEFAULT_CONTEXT_UUID) + MSG = 'Context({:s}) not found; context_uuid generated was: {:s}' + assert e.value.details() == MSG.format(CONTEXT_NAME, context_uuid) # ----- List when the object does not exist ------------------------------------------------------------------------ - response = context_client_grpc.ListContextIds(Empty()) + response = context_client.ListContextIds(Empty()) assert len(response.context_ids) == 0 - response = context_client_grpc.ListContexts(Empty()) + response = context_client.ListContexts(Empty()) assert len(response.contexts) == 0 # ----- Create the object ------------------------------------------------------------------------------------------ - response = context_client_grpc.SetContext(Context(**CONTEXT)) - assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - wrong_context_uuid = str(uuid.uuid4()) - wrong_context_id = json_context_id(wrong_context_uuid) - with pytest.raises(grpc.RpcError) as e: - WRONG_CONTEXT = copy.deepcopy(CONTEXT) - WRONG_CONTEXT['topology_ids'].append(json_topology_id(str(uuid.uuid4()), context_id=wrong_context_id)) - context_client_grpc.SetContext(Context(**WRONG_CONTEXT)) - assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT - msg = 'request.topology_ids[0].context_id.context_uuid.uuid({}) is invalid; '\ - 'should be == request.context_id.context_uuid.uuid({})'.format(wrong_context_uuid, DEFAULT_CONTEXT_UUID) - assert e.value.details() == msg - - with pytest.raises(grpc.RpcError) as e: - WRONG_CONTEXT = copy.deepcopy(CONTEXT) - WRONG_CONTEXT['service_ids'].append(json_service_id(str(uuid.uuid4()), context_id=wrong_context_id)) - context_client_grpc.SetContext(Context(**WRONG_CONTEXT)) - assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT - msg = 'request.service_ids[0].context_id.context_uuid.uuid({}) is invalid; '\ - 'should be == request.context_id.context_uuid.uuid({})'.format(wrong_context_uuid, DEFAULT_CONTEXT_UUID) - assert e.value.details() == msg - - with pytest.raises(grpc.RpcError) as e: - WRONG_CONTEXT = copy.deepcopy(CONTEXT) - WRONG_CONTEXT['slice_ids'].append(json_slice_id(str(uuid.uuid4()), context_id=wrong_context_id)) - context_client_grpc.SetContext(Context(**WRONG_CONTEXT)) - assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT - msg = 'request.slice_ids[0].context_id.context_uuid.uuid({}) is invalid; '\ - 'should be == request.context_id.context_uuid.uuid({})'.format(wrong_context_uuid, DEFAULT_CONTEXT_UUID) - assert e.value.details() == msg + response = context_client.SetContext(Context(**CONTEXT)) + assert response.context_uuid.uuid == context_uuid # ----- Check create event ----------------------------------------------------------------------------------------- #event = events_collector.get_event(block=True, timeout=10.0) #assert isinstance(event, ContextEvent) #assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE - #assert event.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + #assert event.context_id.context_uuid.uuid == context_uuid # ----- Get when the object exists --------------------------------------------------------------------------------- - response = context_client_grpc.GetContext(ContextId(**CONTEXT_ID)) - assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.name == '' + response = context_client.GetContext(ContextId(**CONTEXT_ID)) + assert response.context_id.context_uuid.uuid == context_uuid + assert response.name == CONTEXT_NAME assert len(response.topology_ids) == 0 assert len(response.service_ids) == 0 assert len(response.slice_ids) == 0 # ----- List when the object exists -------------------------------------------------------------------------------- - response = context_client_grpc.ListContextIds(Empty()) + response = context_client.ListContextIds(Empty()) assert len(response.context_ids) == 1 - assert response.context_ids[0].context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.context_ids[0].context_uuid.uuid == context_uuid - response = context_client_grpc.ListContexts(Empty()) + response = context_client.ListContexts(Empty()) assert len(response.contexts) == 1 - assert response.contexts[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.contexts[0].name == '' + assert response.contexts[0].context_id.context_uuid.uuid == context_uuid + assert response.contexts[0].name == CONTEXT_NAME assert len(response.contexts[0].topology_ids) == 0 assert len(response.contexts[0].service_ids) == 0 assert len(response.contexts[0].slice_ids) == 0 @@ -110,50 +80,50 @@ def grpc_context(context_client_grpc : ContextClient) -> None: new_context_name = 'new' CONTEXT_WITH_NAME = copy.deepcopy(CONTEXT) CONTEXT_WITH_NAME['name'] = new_context_name - response = context_client_grpc.SetContext(Context(**CONTEXT_WITH_NAME)) - assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID + response = context_client.SetContext(Context(**CONTEXT_WITH_NAME)) + assert response.context_uuid.uuid == context_uuid # ----- Check update event ----------------------------------------------------------------------------------------- #event = events_collector.get_event(block=True, timeout=10.0) #assert isinstance(event, ContextEvent) #assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - #assert event.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + #assert event.context_id.context_uuid.uuid == context_uuid # ----- Get when the object is modified ---------------------------------------------------------------------------- - response = context_client_grpc.GetContext(ContextId(**CONTEXT_ID)) - assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + response = context_client.GetContext(ContextId(**CONTEXT_ID)) + assert response.context_id.context_uuid.uuid == context_uuid assert response.name == new_context_name assert len(response.topology_ids) == 0 assert len(response.service_ids) == 0 assert len(response.slice_ids) == 0 # ----- List when the object is modified --------------------------------------------------------------------------- - response = context_client_grpc.ListContextIds(Empty()) + response = context_client.ListContextIds(Empty()) assert len(response.context_ids) == 1 - assert response.context_ids[0].context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.context_ids[0].context_uuid.uuid == context_uuid - response = context_client_grpc.ListContexts(Empty()) + response = context_client.ListContexts(Empty()) assert len(response.contexts) == 1 - assert response.contexts[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + assert response.contexts[0].context_id.context_uuid.uuid == context_uuid assert response.contexts[0].name == new_context_name assert len(response.contexts[0].topology_ids) == 0 assert len(response.contexts[0].service_ids) == 0 assert len(response.contexts[0].slice_ids) == 0 # ----- Remove the object ------------------------------------------------------------------------------------------ - context_client_grpc.RemoveContext(ContextId(**CONTEXT_ID)) + context_client.RemoveContext(ContextId(**CONTEXT_ID)) # ----- Check remove event ----------------------------------------------------------------------------------------- #event = events_collector.get_event(block=True, timeout=10.0) #assert isinstance(event, ContextEvent) #assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - #assert event.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + #assert event.context_id.context_uuid.uuid == context_uuid # ----- List after deleting the object ----------------------------------------------------------------------------- - response = context_client_grpc.ListContextIds(Empty()) + response = context_client.ListContextIds(Empty()) assert len(response.context_ids) == 0 - response = context_client_grpc.ListContexts(Empty()) + response = context_client.ListContexts(Empty()) assert len(response.contexts) == 0 # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- diff --git a/src/context/tests/_test_device.py b/src/context/tests/test_device.py similarity index 56% rename from src/context/tests/_test_device.py rename to src/context/tests/test_device.py index 20760a961..381b5d4fd 100644 --- a/src/context/tests/_test_device.py +++ b/src/context/tests/test_device.py @@ -13,122 +13,125 @@ # limitations under the License. import copy, grpc, pytest -from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID from common.proto.context_pb2 import ( Context, ContextId, Device, DeviceDriverEnum, DeviceId, DeviceOperationalStatusEnum, Empty, Topology, TopologyId) from context.client.ContextClient import ContextClient +from context.service.database.methods.uuids.Device import device_get_uuid #from context.client.EventsCollector import EventsCollector -from .Objects import CONTEXT, CONTEXT_ID, DEVICE_R1, DEVICE_R1_ID, DEVICE_R1_UUID, TOPOLOGY, TOPOLOGY_ID +from .Objects import CONTEXT, CONTEXT_ID, DEVICE_R1, DEVICE_R1_ID, DEVICE_R1_NAME, TOPOLOGY, TOPOLOGY_ID -def grpc_device(context_client_grpc : ContextClient) -> None: +@pytest.mark.depends(on=['context/tests/test_topology.py::test_topology']) +def test_device(context_client : ContextClient) -> None: # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- #events_collector = EventsCollector( - # context_client_grpc, log_events_received=True, + # context_client, log_events_received=True, # activate_context_collector = False, activate_topology_collector = False, activate_device_collector = True, # activate_link_collector = False, activate_service_collector = False, activate_slice_collector = False, # activate_connection_collector = False) #events_collector.start() # ----- Prepare dependencies for the test and capture related events ----------------------------------------------- - response = context_client_grpc.SetContext(Context(**CONTEXT)) - assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID + response = context_client.SetContext(Context(**CONTEXT)) + context_uuid = response.context_uuid.uuid - response = context_client_grpc.SetTopology(Topology(**TOPOLOGY)) - assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + response = context_client.SetTopology(Topology(**TOPOLOGY)) + topology_uuid = response.topology_uuid.uuid #events = events_collector.get_events(block=True, count=2) #assert isinstance(events[0], ContextEvent) #assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - #assert events[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + #assert events[0].context_id.context_uuid.uuid == context_uuid #assert isinstance(events[1], TopologyEvent) #assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - #assert events[1].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - #assert events[1].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + #assert events[1].topology_id.context_id.context_uuid.uuid == context_uuid + #assert events[1].topology_id.topology_uuid.uuid == topology_uuid # ----- Get when the object does not exist ------------------------------------------------------------------------- + device_id = DeviceId(**DEVICE_R1_ID) + device_uuid = device_get_uuid(device_id, allow_random=False) with pytest.raises(grpc.RpcError) as e: - context_client_grpc.GetDevice(DeviceId(**DEVICE_R1_ID)) + context_client.GetDevice(device_id) assert e.value.code() == grpc.StatusCode.NOT_FOUND - assert e.value.details() == 'Device({:s}) not found'.format(DEVICE_R1_UUID) + MSG = 'Device({:s}) not found; device_uuid generated was: {:s}' + assert e.value.details() == MSG.format(DEVICE_R1_NAME, device_uuid) # ----- List when the object does not exist ------------------------------------------------------------------------ - response = context_client_grpc.ListDeviceIds(Empty()) + response = context_client.ListDeviceIds(Empty()) assert len(response.device_ids) == 0 - response = context_client_grpc.ListDevices(Empty()) + response = context_client.ListDevices(Empty()) assert len(response.devices) == 0 # ----- Create the object ------------------------------------------------------------------------------------------ with pytest.raises(grpc.RpcError) as e: WRONG_DEVICE = copy.deepcopy(DEVICE_R1) - WRONG_DEVICE_UUID = '3f03c76d-31fb-47f5-9c1d-bc6b6bfa2d08' + WRONG_DEVICE_UUID = 'ffffffff-ffff-ffff-ffff-ffffffffffff' WRONG_DEVICE['device_endpoints'][0]['endpoint_id']['device_id']['device_uuid']['uuid'] = WRONG_DEVICE_UUID - context_client_grpc.SetDevice(Device(**WRONG_DEVICE)) + context_client.SetDevice(Device(**WRONG_DEVICE)) assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT - msg = 'request.device_endpoints[0].device_id.device_uuid.uuid({}) is invalid; '\ - 'should be == request.device_id.device_uuid.uuid({})'.format(WRONG_DEVICE_UUID, DEVICE_R1_UUID) - assert e.value.details() == msg + MSG = 'request.device_endpoints[0].device_id.device_uuid.uuid({}) is invalid; '\ + 'should be == request.device_id.device_uuid.uuid({})' + assert e.value.details() == MSG.format(WRONG_DEVICE_UUID, device_id.device_uuid.uuid) # pylint: disable=no-member - response = context_client_grpc.SetDevice(Device(**DEVICE_R1)) - assert response.device_uuid.uuid == DEVICE_R1_UUID + response = context_client.SetDevice(Device(**DEVICE_R1)) + assert response.device_uuid.uuid == device_uuid # ----- Check create event ----------------------------------------------------------------------------------------- - # event = events_collector.get_event(block=True) - # assert isinstance(event, DeviceEvent) - # assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE - # assert event.device_id.device_uuid.uuid == DEVICE_R1_UUID + #event = events_collector.get_event(block=True) + #assert isinstance(event, DeviceEvent) + #assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert event.device_id.device_uuid.uuid == device_uuid # ----- Get when the object exists --------------------------------------------------------------------------------- - response = context_client_grpc.GetDevice(DeviceId(**DEVICE_R1_ID)) - assert response.device_id.device_uuid.uuid == DEVICE_R1_UUID - assert response.name == '' + response = context_client.GetDevice(DeviceId(**DEVICE_R1_ID)) + assert response.device_id.device_uuid.uuid == device_uuid + assert response.name == DEVICE_R1_NAME assert response.device_type == 'packet-router' - assert len(response.device_config.config_rules) == 3 + #assert len(response.device_config.config_rules) == 3 assert response.device_operational_status == DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_DISABLED assert len(response.device_drivers) == 1 assert DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG in response.device_drivers assert len(response.device_endpoints) == 3 # ----- List when the object exists -------------------------------------------------------------------------------- - response = context_client_grpc.ListDeviceIds(Empty()) + response = context_client.ListDeviceIds(Empty()) assert len(response.device_ids) == 1 - assert response.device_ids[0].device_uuid.uuid == DEVICE_R1_UUID + assert response.device_ids[0].device_uuid.uuid == device_uuid - response = context_client_grpc.ListDevices(Empty()) + response = context_client.ListDevices(Empty()) assert len(response.devices) == 1 - assert response.devices[0].device_id.device_uuid.uuid == DEVICE_R1_UUID - assert response.devices[0].name == '' + assert response.devices[0].device_id.device_uuid.uuid == device_uuid + assert response.devices[0].name == DEVICE_R1_NAME assert response.devices[0].device_type == 'packet-router' - assert len(response.devices[0].device_config.config_rules) == 3 + #assert len(response.devices[0].device_config.config_rules) == 3 assert response.devices[0].device_operational_status == DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_DISABLED assert len(response.devices[0].device_drivers) == 1 assert DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG in response.devices[0].device_drivers assert len(response.devices[0].device_endpoints) == 3 # ----- Update the object ------------------------------------------------------------------------------------------ - new_device_name = 'r1' + new_device_name = 'new' new_device_driver = DeviceDriverEnum.DEVICEDRIVER_UNDEFINED DEVICE_UPDATED = copy.deepcopy(DEVICE_R1) DEVICE_UPDATED['name'] = new_device_name DEVICE_UPDATED['device_operational_status'] = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED DEVICE_UPDATED['device_drivers'].append(new_device_driver) - response = context_client_grpc.SetDevice(Device(**DEVICE_UPDATED)) - assert response.device_uuid.uuid == DEVICE_R1_UUID + response = context_client.SetDevice(Device(**DEVICE_UPDATED)) + assert response.device_uuid.uuid == device_uuid # ----- Check update event ----------------------------------------------------------------------------------------- - # event = events_collector.get_event(block=True) - # assert isinstance(event, DeviceEvent) - # assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - # assert event.device_id.device_uuid.uuid == DEVICE_R1_UUID + #event = events_collector.get_event(block=True) + #assert isinstance(event, DeviceEvent) + #assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + #assert event.device_id.device_uuid.uuid == device_uuid # ----- Get when the object is modified ---------------------------------------------------------------------------- - response = context_client_grpc.GetDevice(DeviceId(**DEVICE_R1_ID)) - assert response.device_id.device_uuid.uuid == DEVICE_R1_UUID + response = context_client.GetDevice(DeviceId(**DEVICE_R1_ID)) + assert response.device_id.device_uuid.uuid == device_uuid assert response.name == new_device_name assert response.device_type == 'packet-router' - assert len(response.device_config.config_rules) == 3 + #assert len(response.device_config.config_rules) == 3 assert response.device_operational_status == DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED assert len(response.device_drivers) == 2 assert DeviceDriverEnum.DEVICEDRIVER_UNDEFINED in response.device_drivers @@ -136,16 +139,16 @@ def grpc_device(context_client_grpc : ContextClient) -> None: assert len(response.device_endpoints) == 3 # ----- List when the object is modified --------------------------------------------------------------------------- - response = context_client_grpc.ListDeviceIds(Empty()) + response = context_client.ListDeviceIds(Empty()) assert len(response.device_ids) == 1 - assert response.device_ids[0].device_uuid.uuid == DEVICE_R1_UUID + assert response.device_ids[0].device_uuid.uuid == device_uuid - response = context_client_grpc.ListDevices(Empty()) + response = context_client.ListDevices(Empty()) assert len(response.devices) == 1 - assert response.devices[0].device_id.device_uuid.uuid == DEVICE_R1_UUID + assert response.devices[0].device_id.device_uuid.uuid == device_uuid assert response.devices[0].name == new_device_name assert response.devices[0].device_type == 'packet-router' - assert len(response.devices[0].device_config.config_rules) == 3 + #assert len(response.devices[0].device_config.config_rules) == 3 assert response.devices[0].device_operational_status == DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED assert len(response.devices[0].device_drivers) == 2 assert DeviceDriverEnum.DEVICEDRIVER_UNDEFINED in response.devices[0].device_drivers @@ -153,47 +156,55 @@ def grpc_device(context_client_grpc : ContextClient) -> None: assert len(response.devices[0].device_endpoints) == 3 # ----- Create object relation ------------------------------------------------------------------------------------- - TOPOLOGY_WITH_DEVICE = copy.deepcopy(TOPOLOGY) - TOPOLOGY_WITH_DEVICE['device_ids'].append(DEVICE_R1_ID) - response = context_client_grpc.SetTopology(Topology(**TOPOLOGY_WITH_DEVICE)) - assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + #TOPOLOGY_WITH_DEVICE = copy.deepcopy(TOPOLOGY) + #TOPOLOGY_WITH_DEVICE['device_ids'].append(DEVICE_R1_ID) + #response = context_client.SetTopology(Topology(**TOPOLOGY_WITH_DEVICE)) + #assert response.context_id.context_uuid.uuid == context_uuid + #assert response.topology_uuid.uuid == topology_uuid # ----- Check update event ----------------------------------------------------------------------------------------- # event = events_collector.get_event(block=True) # assert isinstance(event, TopologyEvent) # assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - # assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - # assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + # assert response.context_id.context_uuid.uuid == context_uuid + # assert response.topology_uuid.uuid == topology_uuid # ----- Check relation was created --------------------------------------------------------------------------------- - response = context_client_grpc.GetTopology(TopologyId(**TOPOLOGY_ID)) - assert response.topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + response = context_client.GetTopology(TopologyId(**TOPOLOGY_ID)) + assert response.topology_id.context_id.context_uuid.uuid == context_uuid + assert response.topology_id.topology_uuid.uuid == topology_uuid assert len(response.device_ids) == 1 - assert response.device_ids[0].device_uuid.uuid == DEVICE_R1_UUID + assert response.device_ids[0].device_uuid.uuid == device_uuid assert len(response.link_ids) == 0 # ----- Remove the object ------------------------------------------------------------------------------------------ - context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R1_ID)) - context_client_grpc.RemoveTopology(TopologyId(**TOPOLOGY_ID)) - context_client_grpc.RemoveContext(ContextId(**CONTEXT_ID)) + #context_client.RemoveDevice(DeviceId(**DEVICE_R1_ID)) # ----- Check remove event ----------------------------------------------------------------------------------------- - # events = events_collector.get_events(block=True, count=3) + #event = events_collector.get_event(block=True) + #assert isinstance(event, DeviceEvent) + #assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert event.device_id.device_uuid.uuid == device_uuid - # assert isinstance(events[0], DeviceEvent) - # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - # assert events[0].device_id.device_uuid.uuid == DEVICE_R1_UUID + # ----- List after deleting the object ----------------------------------------------------------------------------- + #response = context_client.ListDeviceIds(Empty()) + #assert len(response.device_ids) == 0 - # assert isinstance(events[1], TopologyEvent) - # assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - # assert events[1].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - # assert events[1].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + #response = context_client.ListDevices(Empty()) + #assert len(response.devices) == 0 - # assert isinstance(events[2], ContextEvent) - # assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - # assert events[2].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + # ----- Clean dependencies used in the test and capture related events --------------------------------------------- + #context_client.RemoveTopology(TopologyId(**TOPOLOGY_ID)) + #context_client.RemoveContext(ContextId(**CONTEXT_ID)) + + #events = events_collector.get_events(block=True, count=2) + #assert isinstance(events[0], TopologyEvent) + #assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert events[0].topology_id.context_id.context_uuid.uuid == context_uuid + #assert events[0].topology_id.topology_uuid.uuid == topology_uuid + #assert isinstance(events[1], ContextEvent) + #assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert events[1].context_id.context_uuid.uuid == context_uuid # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- #events_collector.stop() diff --git a/src/context/tests/_test_topology.py b/src/context/tests/test_topology.py similarity index 57% rename from src/context/tests/_test_topology.py rename to src/context/tests/test_topology.py index 9774d972f..142887d09 100644 --- a/src/context/tests/_test_topology.py +++ b/src/context/tests/test_topology.py @@ -13,154 +13,162 @@ # limitations under the License. import copy, grpc, pytest -from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID from common.proto.context_pb2 import Context, ContextId, Topology, TopologyId from context.client.ContextClient import ContextClient +from context.service.database.methods.uuids.Topology import topology_get_uuid #from context.client.EventsCollector import EventsCollector -from .Objects import CONTEXT, CONTEXT_ID, TOPOLOGY, TOPOLOGY_ID +from .Objects import CONTEXT, CONTEXT_ID, CONTEXT_NAME, TOPOLOGY, TOPOLOGY_ID, TOPOLOGY_NAME -def grpc_topology(context_client_grpc : ContextClient) -> None: +@pytest.mark.depends(on=['context/tests/test_context.py::test_context']) +def test_topology(context_client : ContextClient) -> None: # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- #events_collector = EventsCollector( - # context_client_grpc, log_events_received=True, + # context_client, log_events_received=True, # activate_context_collector = False, activate_topology_collector = True, activate_device_collector = False, # activate_link_collector = False, activate_service_collector = False, activate_slice_collector = False, # activate_connection_collector = False) #events_collector.start() # ----- Prepare dependencies for the test and capture related events ----------------------------------------------- - response = context_client_grpc.SetContext(Context(**CONTEXT)) - assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID + response = context_client.SetContext(Context(**CONTEXT)) + context_uuid = response.context_uuid.uuid + # event = events_collector.get_event(block=True) # assert isinstance(event, ContextEvent) # assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE - # assert event.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + # assert event.context_id.context_uuid.uuid == context_uuid # ----- Get when the object does not exist ------------------------------------------------------------------------- + topology_id = TopologyId(**TOPOLOGY_ID) + context_uuid,topology_uuid = topology_get_uuid(topology_id, allow_random=False) with pytest.raises(grpc.RpcError) as e: - context_client_grpc.GetTopology(TopologyId(**TOPOLOGY_ID)) + context_client.GetTopology(topology_id) assert e.value.code() == grpc.StatusCode.NOT_FOUND - assert e.value.details() == 'Topology({:s}/{:s}) not found'.format(DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID) + MSG = 'Topology({:s}/{:s}) not found; context_uuid generated was: {:s}; topology_uuid generated was: {:s}' + assert e.value.details() == MSG.format(CONTEXT_NAME, TOPOLOGY_NAME, context_uuid, topology_uuid) # ----- List when the object does not exist ------------------------------------------------------------------------ - response = context_client_grpc.ListTopologyIds(ContextId(**CONTEXT_ID)) + response = context_client.GetContext(ContextId(**CONTEXT_ID)) assert len(response.topology_ids) == 0 + assert len(response.service_ids) == 0 + assert len(response.slice_ids) == 0 - response = context_client_grpc.ListTopologies(ContextId(**CONTEXT_ID)) + response = context_client.ListTopologyIds(ContextId(**CONTEXT_ID)) + assert len(response.topology_ids) == 0 + + response = context_client.ListTopologies(ContextId(**CONTEXT_ID)) assert len(response.topologies) == 0 # ----- Create the object ------------------------------------------------------------------------------------------ - response = context_client_grpc.SetTopology(Topology(**TOPOLOGY)) - assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - - #CONTEXT_WITH_TOPOLOGY = copy.deepcopy(CONTEXT) - #CONTEXT_WITH_TOPOLOGY['topology_ids'].append(TOPOLOGY_ID) - #response = context_client_grpc.SetContext(Context(**CONTEXT_WITH_TOPOLOGY)) - #assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID + response = context_client.SetTopology(Topology(**TOPOLOGY)) + assert response.context_id.context_uuid.uuid == context_uuid + assert response.topology_uuid.uuid == topology_uuid # ----- Check create event ----------------------------------------------------------------------------------------- - #events = events_collector.get_events(block=True, count=2) - #assert isinstance(events[0], TopologyEvent) - #assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - #assert events[0].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - #assert events[0].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - #assert isinstance(events[1], ContextEvent) - #assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - #assert events[1].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + #event = events_collector.get_event(block=True) + #assert isinstance(event, TopologyEvent) + #assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert event.topology_id.context_id.context_uuid.uuid == context_uuid + #assert event.topology_id.topology_uuid.uuid == topology_uuid # ----- Get when the object exists --------------------------------------------------------------------------------- - response = context_client_grpc.GetContext(ContextId(**CONTEXT_ID)) - assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.name == '' + response = context_client.GetContext(ContextId(**CONTEXT_ID)) + assert response.context_id.context_uuid.uuid == context_uuid + assert response.name == CONTEXT_NAME assert len(response.topology_ids) == 1 - assert response.topology_ids[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.topology_ids[0].topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + assert response.topology_ids[0].context_id.context_uuid.uuid == context_uuid + assert response.topology_ids[0].topology_uuid.uuid == topology_uuid assert len(response.service_ids) == 0 assert len(response.slice_ids) == 0 - response = context_client_grpc.GetTopology(TopologyId(**TOPOLOGY_ID)) - assert response.topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - assert response.name == '' + response = context_client.GetTopology(TopologyId(**TOPOLOGY_ID)) + assert response.topology_id.context_id.context_uuid.uuid == context_uuid + assert response.topology_id.topology_uuid.uuid == topology_uuid + assert response.name == TOPOLOGY_NAME assert len(response.device_ids) == 0 assert len(response.link_ids) == 0 # ----- List when the object exists -------------------------------------------------------------------------------- - response = context_client_grpc.ListTopologyIds(ContextId(**CONTEXT_ID)) + response = context_client.ListTopologyIds(ContextId(**CONTEXT_ID)) assert len(response.topology_ids) == 1 - assert response.topology_ids[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.topology_ids[0].topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + assert response.topology_ids[0].context_id.context_uuid.uuid == context_uuid + assert response.topology_ids[0].topology_uuid.uuid == topology_uuid - response = context_client_grpc.ListTopologies(ContextId(**CONTEXT_ID)) + response = context_client.ListTopologies(ContextId(**CONTEXT_ID)) assert len(response.topologies) == 1 - assert response.topologies[0].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.topologies[0].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - assert response.topologies[0].name == '' + assert response.topologies[0].topology_id.context_id.context_uuid.uuid == context_uuid + assert response.topologies[0].topology_id.topology_uuid.uuid == topology_uuid + assert response.topologies[0].name == TOPOLOGY_NAME assert len(response.topologies[0].device_ids) == 0 assert len(response.topologies[0].link_ids) == 0 # ----- Update the object ------------------------------------------------------------------------------------------ new_topology_name = 'new' - TOPOLOGY_WITH_NAME = copy.deepcopy(TOPOLOGY) - TOPOLOGY_WITH_NAME['name'] = new_topology_name - response = context_client_grpc.SetTopology(Topology(**TOPOLOGY_WITH_NAME)) - assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + TOPOLOGY_UPDATED = copy.deepcopy(TOPOLOGY) + TOPOLOGY_UPDATED['name'] = new_topology_name + response = context_client.SetTopology(Topology(**TOPOLOGY_UPDATED)) + assert response.context_id.context_uuid.uuid == context_uuid + assert response.topology_uuid.uuid == topology_uuid # ----- Check update event ----------------------------------------------------------------------------------------- #event = events_collector.get_event(block=True) #assert isinstance(event, TopologyEvent) #assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - #assert event.topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - #assert event.topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + #assert event.topology_id.context_id.context_uuid.uuid == context_uuid + #assert event.topology_id.topology_uuid.uuid == topology_uuid # ----- Get when the object is modified ---------------------------------------------------------------------------- - response = context_client_grpc.GetTopology(TopologyId(**TOPOLOGY_ID)) - assert response.topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + response = context_client.GetTopology(TopologyId(**TOPOLOGY_ID)) + assert response.topology_id.context_id.context_uuid.uuid == context_uuid + assert response.topology_id.topology_uuid.uuid == topology_uuid assert response.name == new_topology_name assert len(response.device_ids) == 0 assert len(response.link_ids) == 0 # ----- List when the object is modified --------------------------------------------------------------------------- - response = context_client_grpc.ListTopologyIds(ContextId(**CONTEXT_ID)) + response = context_client.ListTopologyIds(ContextId(**CONTEXT_ID)) assert len(response.topology_ids) == 1 - assert response.topology_ids[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.topology_ids[0].topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + assert response.topology_ids[0].context_id.context_uuid.uuid == context_uuid + assert response.topology_ids[0].topology_uuid.uuid == topology_uuid - response = context_client_grpc.ListTopologies(ContextId(**CONTEXT_ID)) + response = context_client.ListTopologies(ContextId(**CONTEXT_ID)) assert len(response.topologies) == 1 - assert response.topologies[0].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.topologies[0].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + assert response.topologies[0].topology_id.context_id.context_uuid.uuid == context_uuid + assert response.topologies[0].topology_id.topology_uuid.uuid == topology_uuid assert response.topologies[0].name == new_topology_name assert len(response.topologies[0].device_ids) == 0 assert len(response.topologies[0].link_ids) == 0 # ----- Remove the object ------------------------------------------------------------------------------------------ - context_client_grpc.RemoveTopology(TopologyId(**TOPOLOGY_ID)) + context_client.RemoveTopology(TopologyId(**TOPOLOGY_ID)) # ----- Check remove event ----------------------------------------------------------------------------------------- #event = events_collector.get_event(block=True) #assert isinstance(event, TopologyEvent) #assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - #assert event.topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - #assert event.topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + #assert event.topology_id.context_id.context_uuid.uuid == context_uuid + #assert event.topology_id.topology_uuid.uuid == topology_uuid # ----- List after deleting the object ----------------------------------------------------------------------------- - response = context_client_grpc.ListTopologyIds(ContextId(**CONTEXT_ID)) + response = context_client.GetContext(ContextId(**CONTEXT_ID)) + assert len(response.topology_ids) == 0 + assert len(response.service_ids) == 0 + assert len(response.slice_ids) == 0 + + response = context_client.ListTopologyIds(ContextId(**CONTEXT_ID)) assert len(response.topology_ids) == 0 - response = context_client_grpc.ListTopologies(ContextId(**CONTEXT_ID)) + response = context_client.ListTopologies(ContextId(**CONTEXT_ID)) assert len(response.topologies) == 0 # ----- Clean dependencies used in the test and capture related events --------------------------------------------- - context_client_grpc.RemoveContext(ContextId(**CONTEXT_ID)) + context_client.RemoveContext(ContextId(**CONTEXT_ID)) + #event = events_collector.get_event(block=True) #assert isinstance(event, ContextEvent) #assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - #assert event.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + #assert event.context_id.context_uuid.uuid == context_uuid # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- #events_collector.stop() diff --git a/test-context.sh b/test-context.sh new file mode 100755 index 000000000..7ad303ca9 --- /dev/null +++ b/test-context.sh @@ -0,0 +1,53 @@ +#!/bin/bash +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +######################################################################################################################## +# Define your deployment settings here +######################################################################################################################## + +# If not already set, set the name of the Kubernetes namespace to deploy to. +export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"} + +######################################################################################################################## +# Automated steps start here +######################################################################################################################## + +PROJECTDIR=`pwd` + +cd $PROJECTDIR/src +RCFILE=$PROJECTDIR/coverage/.coveragerc +COVERAGEFILE=$PROJECTDIR/coverage/.coverage + +# Destroy old coverage file and configure the correct folder on the .coveragerc file +rm -f $COVERAGEFILE +cat $PROJECTDIR/coverage/.coveragerc.template | sed s+~/tfs-ctrl+$PROJECTDIR+g > $RCFILE + +#export CRDB_URI="cockroachdb://tfs:tfs123@127.0.0.1:26257/tfs_test?sslmode=require" +export CRDB_URI="cockroachdb://tfs:tfs123@10.1.7.195:26257/tfs_test?sslmode=require" +export PYTHONPATH=/home/tfs/tfs-ctrl/src + +# Run unitary tests and analyze coverage of code at same time +# helpful pytest flags: --log-level=INFO -o log_cli=true --verbose --maxfail=1 --durations=0 +coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose --maxfail=1 \ + context/tests/test_hasher.py \ + context/tests/test_context.py \ + context/tests/test_topology.py \ + context/tests/test_device.py + +echo +echo "Coverage report:" +echo "----------------" +#coverage report --rcfile=$RCFILE --sort cover --show-missing --skip-covered | grep --color -E -i "^context/.*$|$" +coverage report --rcfile=$RCFILE --sort cover --show-missing --skip-covered --include="context/*" -- GitLab From c48a557701e15bd48f60240e5cd61bba99167486 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Thu, 5 Jan 2023 15:11:36 +0000 Subject: [PATCH 024/158] Context component: - relocated database methods - corrected models to use single-column primary key - corrected test cases --- .../service/ContextServiceServicerImpl.py | 55 ++--- .../service/database/{methods => }/Context.py | 2 +- .../service/database/{methods => }/Device.py | 53 ++-- .../service/database/{methods => }/Link.py | 89 ++++--- .../service/database/{methods => }/Service.py | 2 +- .../database/{methods => }/Topology.py | 4 +- .../database/methods/uuids/__init__.py | 13 - .../database/models/ConfigRuleModel.py | 40 +-- .../service/database/models/DeviceModel.py | 11 +- .../service/database/models/EndPointModel.py | 2 +- .../service/database/models/LinkModel.py | 5 +- .../service/database/models/RelationModels.py | 66 ++--- .../service/database/models/TopologyModel.py | 8 +- .../database/{methods => }/uuids/Context.py | 0 .../database/{methods => }/uuids/Device.py | 0 .../database/{methods => }/uuids/EndPoint.py | 0 .../database/{methods => }/uuids/Link.py | 0 .../database/{methods => }/uuids/Topology.py | 0 .../database/{methods => }/uuids/_Builder.py | 0 .../database/{methods => uuids}/__init__.py | 0 src/context/tests/Objects.py | 232 ++++++------------ src/context/tests/conftest.py | 2 +- src/context/tests/test_context.py | 2 +- src/context/tests/test_device.py | 48 ++-- .../tests/{_test_link.py => test_link.py} | 170 +++++++------ src/context/tests/test_topology.py | 2 +- test-context.sh | 3 +- 27 files changed, 353 insertions(+), 456 deletions(-) rename src/context/service/database/{methods => }/Context.py (98%) rename src/context/service/database/{methods => }/Device.py (88%) rename src/context/service/database/{methods => }/Link.py (58%) rename src/context/service/database/{methods => }/Service.py (99%) rename src/context/service/database/{methods => }/Topology.py (97%) delete mode 100644 src/context/service/database/methods/uuids/__init__.py rename src/context/service/database/{methods => }/uuids/Context.py (100%) rename src/context/service/database/{methods => }/uuids/Device.py (100%) rename src/context/service/database/{methods => }/uuids/EndPoint.py (100%) rename src/context/service/database/{methods => }/uuids/Link.py (100%) rename src/context/service/database/{methods => }/uuids/Topology.py (100%) rename src/context/service/database/{methods => }/uuids/_Builder.py (100%) rename src/context/service/database/{methods => uuids}/__init__.py (100%) rename src/context/tests/{_test_link.py => test_link.py} (51%) diff --git a/src/context/service/ContextServiceServicerImpl.py b/src/context/service/ContextServiceServicerImpl.py index 44409bd0c..6914e05a0 100644 --- a/src/context/service/ContextServiceServicerImpl.py +++ b/src/context/service/ContextServiceServicerImpl.py @@ -35,14 +35,11 @@ from common.proto.context_policy_pb2_grpc import ContextPolicyServiceServicer from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method #from common.rpc_method_wrapper.ServiceExceptions import ( # InvalidArgumentException, NotFoundException, OperationFailedException) -from .database.methods.Context import ( - context_delete, context_get, context_list_ids, context_list_objs, context_set) -from .database.methods.Device import ( - device_delete, device_get, device_list_ids, device_list_objs, device_set) -#from .database.methods.Link import link_delete, link_get, link_list_ids, link_list_objs, link_set -#from .database.methods.Service import service_delete, service_get, service_list_ids, service_list_objs, service_set -from .database.methods.Topology import ( - topology_delete, topology_get, topology_list_ids, topology_list_objs, topology_set) +from .database.Context import context_delete, context_get, context_list_ids, context_list_objs, context_set +from .database.Device import device_delete, device_get, device_list_ids, device_list_objs, device_set +from .database.Link import link_delete, link_get, link_list_ids, link_list_objs, link_set +#from .database.Service import service_delete, service_get, service_list_ids, service_list_objs, service_set +from .database.Topology import topology_delete, topology_get, topology_list_ids, topology_list_objs, topology_set #from common.tools.grpc.Tools import grpc_message_to_json, grpc_message_to_json_string #from context.service.Database import Database #from context.service.database.ConfigModel import ( @@ -200,31 +197,31 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # ----- Link ------------------------------------------------------------------------------------------------------- -# @safe_and_metered_rpc_method(METRICS, LOGGER) -# def ListLinkIds(self, request : Empty, context : grpc.ServicerContext) -> LinkIdList: -# return link_list_ids(self.db_engine) + @safe_and_metered_rpc_method(METRICS, LOGGER) + def ListLinkIds(self, request : Empty, context : grpc.ServicerContext) -> LinkIdList: + return link_list_ids(self.db_engine) -# @safe_and_metered_rpc_method(METRICS, LOGGER) -# def ListLinks(self, request : Empty, context : grpc.ServicerContext) -> LinkList: -# return link_list_objs(self.db_engine) + @safe_and_metered_rpc_method(METRICS, LOGGER) + def ListLinks(self, request : Empty, context : grpc.ServicerContext) -> LinkList: + return link_list_objs(self.db_engine) -# @safe_and_metered_rpc_method(METRICS, LOGGER) -# def GetLink(self, request : LinkId, context : grpc.ServicerContext) -> Link: -# return link_get(self.db_engine, request) + @safe_and_metered_rpc_method(METRICS, LOGGER) + def GetLink(self, request : LinkId, context : grpc.ServicerContext) -> Link: + return link_get(self.db_engine, request) -# @safe_and_metered_rpc_method(METRICS, LOGGER) -# def SetLink(self, request : Link, context : grpc.ServicerContext) -> LinkId: -# link_id,updated = link_set(self.db_engine, request) -# #event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE -# #notify_event(self.messagebroker, TOPIC_LINK, event_type, {'link_id': link_id}) -# return link_id + @safe_and_metered_rpc_method(METRICS, LOGGER) + def SetLink(self, request : Link, context : grpc.ServicerContext) -> LinkId: + link_id,updated = link_set(self.db_engine, request) + #event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE + #notify_event(self.messagebroker, TOPIC_LINK, event_type, {'link_id': link_id}) + return link_id -# @safe_and_metered_rpc_method(METRICS, LOGGER) -# def RemoveLink(self, request : LinkId, context : grpc.ServicerContext) -> Empty: -# deleted = link_delete(self.db_engine, request) -# #if deleted: -# # notify_event(self.messagebroker, TOPIC_LINK, event_type, {'link_id': dict_link_id}) -# return Empty() + @safe_and_metered_rpc_method(METRICS, LOGGER) + def RemoveLink(self, request : LinkId, context : grpc.ServicerContext) -> Empty: + deleted = link_delete(self.db_engine, request) + #if deleted: + # notify_event(self.messagebroker, TOPIC_LINK, EventTypeEnum.EVENTTYPE_REMOVE, {'link_id': request}) + return Empty() @safe_and_metered_rpc_method(METRICS, LOGGER) def GetLinkEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[LinkEvent]: diff --git a/src/context/service/database/methods/Context.py b/src/context/service/database/Context.py similarity index 98% rename from src/context/service/database/methods/Context.py rename to src/context/service/database/Context.py index fc53426e3..85a06d65e 100644 --- a/src/context/service/database/methods/Context.py +++ b/src/context/service/database/Context.py @@ -21,7 +21,7 @@ from typing import Dict, List, Optional, Tuple from common.proto.context_pb2 import Context, ContextId, ContextIdList, ContextList from common.rpc_method_wrapper.ServiceExceptions import NotFoundException from common.tools.object_factory.Context import json_context_id -from context.service.database.models.ContextModel import ContextModel +from .models.ContextModel import ContextModel from .uuids.Context import context_get_uuid LOGGER = logging.getLogger(__name__) diff --git a/src/context/service/database/methods/Device.py b/src/context/service/database/Device.py similarity index 88% rename from src/context/service/database/methods/Device.py rename to src/context/service/database/Device.py index 39ae98de0..a0e0a53e5 100644 --- a/src/context/service/database/methods/Device.py +++ b/src/context/service/database/Device.py @@ -21,15 +21,16 @@ from typing import Dict, List, Optional, Set, Tuple from common.proto.context_pb2 import Device, DeviceId, DeviceIdList, DeviceList from common.rpc_method_wrapper.ServiceExceptions import InvalidArgumentException, NotFoundException from common.tools.object_factory.Device import json_device_id -#from common.tools.grpc.Tools import grpc_message_to_json_string -#from context.service.database.models.ConfigRuleModel import ConfigRuleKindEnum, ConfigRuleModel -from context.service.database.models.DeviceModel import DeviceModel -from context.service.database.models.EndPointModel import EndPointModel -from context.service.database.models.RelationModels import TopologyDeviceModel -#from context.service.database.models.enums.ConfigAction import grpc_to_enum__config_action -from context.service.database.models.enums.DeviceDriver import grpc_to_enum__device_driver -from context.service.database.models.enums.DeviceOperationalStatus import grpc_to_enum__device_operational_status -from context.service.database.models.enums.KpiSampleType import grpc_to_enum__kpi_sample_type +from common.tools.grpc.Tools import grpc_message_to_json_string +from .models.ConfigRuleModel import ConfigRuleKindEnum, ConfigRuleModel +from .models.DeviceModel import DeviceModel +from .models.EndPointModel import EndPointModel +from .models.RelationModels import TopologyDeviceModel +from .models.enums.ConfigAction import grpc_to_enum__config_action +from .models.enums.DeviceDriver import grpc_to_enum__device_driver +from .models.enums.DeviceOperationalStatus import grpc_to_enum__device_operational_status +from .models.enums.KpiSampleType import grpc_to_enum__kpi_sample_type +from .uuids._Builder import get_uuid_random from .uuids.Device import device_get_uuid from .uuids.EndPoint import endpoint_get_uuid @@ -64,7 +65,7 @@ def device_get(db_engine : Engine, request : DeviceId) -> Device: def device_set(db_engine : Engine, request : Device) -> bool: raw_device_uuid = request.device_id.device_uuid.uuid raw_device_name = request.name - device_name = request.device_id.device_uuid.uuid if len(raw_device_name) == 0 else raw_device_name + device_name = raw_device_uuid if len(raw_device_name) == 0 else raw_device_name device_uuid = device_get_uuid(request.device_id, device_name=device_name, allow_random=True) device_type = request.device_type @@ -83,9 +84,11 @@ def device_set(db_engine : Engine, request : Device) -> bool: ['should be == request.device_id.device_uuid.uuid({:s})'.format(raw_device_uuid)] ) + raw_endpoint_uuid = endpoint.endpoint_id.endpoint_uuid.uuid raw_endpoint_name = endpoint.name endpoint_topology_uuid, endpoint_device_uuid, endpoint_uuid = endpoint_get_uuid( endpoint.endpoint_id, endpoint_name=raw_endpoint_name, allow_random=True) + endpoint_name = raw_endpoint_uuid if len(raw_endpoint_name) == 0 else raw_endpoint_name kpi_sample_types = [grpc_to_enum__kpi_sample_type(kst) for kst in endpoint.kpi_sample_types] @@ -93,7 +96,7 @@ def device_set(db_engine : Engine, request : Device) -> bool: 'endpoint_uuid' : endpoint_uuid, 'device_uuid' : endpoint_device_uuid, 'topology_uuid' : endpoint_topology_uuid, - 'name' : raw_endpoint_name, + 'name' : endpoint_name, 'endpoint_type' : endpoint.endpoint_type, 'kpi_sample_types': kpi_sample_types, }) @@ -101,20 +104,22 @@ def device_set(db_engine : Engine, request : Device) -> bool: if endpoint_topology_uuid not in topology_uuids: related_topologies.append({ 'topology_uuid': endpoint_topology_uuid, - 'device_uuid' : endpoint_device_uuid, + 'device_uuid' : device_uuid, }) topology_uuids.add(endpoint_topology_uuid) - #config_rules : List[Dict] = list() - #for position,config_rule in enumerate(request.device_config.config_rules): - # str_kind = config_rule.WhichOneof('config_rule') - # config_rules.append({ - # 'device_uuid': device_uuid, - # 'kind' : ConfigRuleKindEnum._member_map_.get(str_kind.upper()), # pylint: disable=no-member - # 'action' : grpc_to_enum__config_action(config_rule.action), - # 'position' : position, - # 'data' : grpc_message_to_json_string(getattr(config_rule, str_kind, {})), - # }) + config_rules : List[Dict] = list() + for position,config_rule in enumerate(request.device_config.config_rules): + configrule_uuid = get_uuid_random() + str_kind = config_rule.WhichOneof('config_rule') + config_rules.append({ + 'configrule_uuid': configrule_uuid, + 'device_uuid' : device_uuid, + 'position' : position, + 'kind' : ConfigRuleKindEnum._member_map_.get(str_kind.upper()), # pylint: disable=no-member + 'action' : grpc_to_enum__config_action(config_rule.action), + 'data' : grpc_message_to_json_string(getattr(config_rule, str_kind, {})), + }) device_data = [{ 'device_uuid' : device_uuid, @@ -152,8 +157,8 @@ def device_set(db_engine : Engine, request : Device) -> bool: index_elements=[TopologyDeviceModel.topology_uuid, TopologyDeviceModel.device_uuid] )) - #session.execute(delete(ConfigRuleModel).where(ConfigRuleModel.device_uuid == device_uuid)) - #session.execute(insert(ConfigRuleModel).values(config_rules)) + session.execute(delete(ConfigRuleModel).where(ConfigRuleModel.device_uuid == device_uuid)) + session.execute(insert(ConfigRuleModel).values(config_rules)) run_transaction(sessionmaker(bind=db_engine), callback) updated = False # TODO: improve and check if created/updated diff --git a/src/context/service/database/methods/Link.py b/src/context/service/database/Link.py similarity index 58% rename from src/context/service/database/methods/Link.py rename to src/context/service/database/Link.py index b98578c22..93f90b3ea 100644 --- a/src/context/service/database/methods/Link.py +++ b/src/context/service/database/Link.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import time from sqlalchemy.dialects.postgresql import insert from sqlalchemy.engine import Engine from sqlalchemy.orm import Session, sessionmaker @@ -20,8 +19,11 @@ from sqlalchemy_cockroachdb import run_transaction from typing import Dict, List, Optional, Set, Tuple from common.proto.context_pb2 import Link, LinkId, LinkIdList, LinkList from common.rpc_method_wrapper.ServiceExceptions import NotFoundException -from context.service.database.models.LinkModel import LinkModel -from context.service.database.models.RelationModels import LinkEndPointModel, TopologyLinkModel +from common.tools.object_factory.Link import json_link_id +from .models.LinkModel import LinkModel +from .models.RelationModels import LinkEndPointModel, TopologyLinkModel +from .uuids.EndPoint import endpoint_get_uuid +from .uuids.Link import link_get_uuid def link_list_ids(db_engine : Engine) -> LinkIdList: def callback(session : Session) -> List[Dict]: @@ -38,81 +40,76 @@ def link_list_objs(db_engine : Engine) -> LinkList: return LinkList(links=run_transaction(sessionmaker(bind=db_engine), callback)) def link_get(db_engine : Engine, request : LinkId) -> Link: - link_uuid = request.link_uuid.uuid + link_uuid = link_get_uuid(request, allow_random=False) def callback(session : Session) -> Optional[Dict]: obj : Optional[LinkModel] = session.query(LinkModel)\ .filter_by(link_uuid=link_uuid).one_or_none() return None if obj is None else obj.dump() obj = run_transaction(sessionmaker(bind=db_engine), callback) - if obj is None: raise NotFoundException('Link', link_uuid) + if obj is None: + raw_link_uuid = request.link_uuid.uuid + raise NotFoundException('Link', raw_link_uuid, extra_details=[ + 'link_uuid generated was: {:s}'.format(link_uuid) + ]) return Link(**obj) def link_set(db_engine : Engine, request : Link) -> bool: - link_uuid = request.link_id.link_uuid.uuid - link_name = request.name + raw_link_uuid = request.link_id.link_uuid.uuid + raw_link_name = request.name + link_name = raw_link_uuid if len(raw_link_name) == 0 else raw_link_name + link_uuid = link_get_uuid(request.link_id, link_name=link_name, allow_random=True) - topology_keys : Set[Tuple[str, str]] = set() + topology_uuids : Set[str] = set() related_topologies : List[Dict] = list() link_endpoints_data : List[Dict] = list() for endpoint_id in request.link_endpoint_ids: - context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid - topology_uuid = endpoint_id.topology_id.topology_uuid.uuid - device_uuid = endpoint_id.device_id.device_uuid.uuid - endpoint_uuid = endpoint_id.endpoint_uuid.uuid + endpoint_topology_uuid, _, endpoint_uuid = endpoint_get_uuid( + endpoint_id, allow_random=False) link_endpoints_data.append({ 'link_uuid' : link_uuid, - 'context_uuid' : context_uuid, - 'topology_uuid': topology_uuid, - 'device_uuid' : device_uuid, 'endpoint_uuid': endpoint_uuid, }) - if len(context_uuid) > 0 and len(topology_uuid) > 0: - topology_key = (context_uuid, topology_uuid) - if topology_key not in topology_keys: - related_topologies.append({ - 'context_uuid': context_uuid, - 'topology_uuid': topology_uuid, - 'link_uuid': link_uuid, - }) - topology_keys.add(topology_key) + if endpoint_topology_uuid not in topology_uuids: + related_topologies.append({ + 'topology_uuid': endpoint_topology_uuid, + 'link_uuid': link_uuid, + }) + topology_uuids.add(endpoint_topology_uuid) + + link_data = [{ + 'link_uuid': link_uuid, + 'link_name': link_name, + }] def callback(session : Session) -> None: - obj : Optional[LinkModel] = session.query(LinkModel).with_for_update()\ - .filter_by(link_uuid=link_uuid).one_or_none() - is_update = obj is not None - if is_update: - obj.link_name = link_name - session.merge(obj) - else: - session.add(LinkModel(link_uuid=link_uuid, link_name=link_name, created_at=time.time())) - obj : Optional[LinkModel] = session.query(LinkModel)\ - .filter_by(link_uuid=link_uuid).one_or_none() + stmt = insert(LinkModel).values(link_data) + stmt = stmt.on_conflict_do_update( + index_elements=[LinkModel.link_uuid], + set_=dict(link_name = stmt.excluded.link_name) + ) + session.execute(stmt) stmt = insert(LinkEndPointModel).values(link_endpoints_data) stmt = stmt.on_conflict_do_nothing( - index_elements=[ - LinkEndPointModel.link_uuid, LinkEndPointModel.context_uuid, LinkEndPointModel.topology_uuid, - LinkEndPointModel.device_uuid, LinkEndPointModel.endpoint_uuid - ], + index_elements=[LinkEndPointModel.link_uuid, LinkEndPointModel.endpoint_uuid] ) session.execute(stmt) session.execute(insert(TopologyLinkModel).values(related_topologies).on_conflict_do_nothing( - index_elements=[ - TopologyLinkModel.context_uuid, TopologyLinkModel.topology_uuid, - TopologyLinkModel.link_uuid - ] + index_elements=[TopologyLinkModel.topology_uuid, TopologyLinkModel.link_uuid] )) + run_transaction(sessionmaker(bind=db_engine), callback) - return False # TODO: improve and check if created/updated + updated = False # TODO: improve and check if created/updated + return LinkId(**json_link_id(link_uuid)),updated def link_delete(db_engine : Engine, request : LinkId) -> bool: - link_uuid = request.link_uuid.uuid + link_uuid = link_get_uuid(request, allow_random=False) def callback(session : Session) -> bool: - session.query(TopologyLinkModel).filter_by(link_uuid=link_uuid).delete() - session.query(LinkEndPointModel).filter_by(link_uuid=link_uuid).delete() + #session.query(TopologyLinkModel).filter_by(link_uuid=link_uuid).delete() + #session.query(LinkEndPointModel).filter_by(link_uuid=link_uuid).delete() num_deleted = session.query(LinkModel).filter_by(link_uuid=link_uuid).delete() #db_link = session.query(LinkModel).filter_by(link_uuid=link_uuid).one_or_none() #session.query(LinkModel).filter_by(link_uuid=link_uuid).delete() diff --git a/src/context/service/database/methods/Service.py b/src/context/service/database/Service.py similarity index 99% rename from src/context/service/database/methods/Service.py rename to src/context/service/database/Service.py index 9f5e519df..3b6b4cc26 100644 --- a/src/context/service/database/methods/Service.py +++ b/src/context/service/database/Service.py @@ -20,7 +20,7 @@ from sqlalchemy_cockroachdb import run_transaction from typing import Dict, List, Optional from common.proto.context_pb2 import ContextId, Service, ServiceId, ServiceIdList, ServiceList from common.rpc_method_wrapper.ServiceExceptions import InvalidArgumentException, NotFoundException -from context.service.database.models.ServiceModel import ServiceModel +from .models.ServiceModel import ServiceModel def service_list_ids(db_engine : Engine, request : ContextId) -> ServiceIdList: context_uuid = request.context_uuid.uuid diff --git a/src/context/service/database/methods/Topology.py b/src/context/service/database/Topology.py similarity index 97% rename from src/context/service/database/methods/Topology.py rename to src/context/service/database/Topology.py index 1abbc5562..25fa02f4b 100644 --- a/src/context/service/database/methods/Topology.py +++ b/src/context/service/database/Topology.py @@ -21,8 +21,8 @@ from common.proto.context_pb2 import ContextId, Topology, TopologyId, TopologyId from common.rpc_method_wrapper.ServiceExceptions import InvalidArgumentsException, NotFoundException from common.tools.object_factory.Context import json_context_id from common.tools.object_factory.Topology import json_topology_id -#from context.service.database.models.RelationModels import TopologyDeviceModel, TopologyLinkModel -from context.service.database.models.TopologyModel import TopologyModel +#from .models.RelationModels import TopologyDeviceModel, TopologyLinkModel +from .models.TopologyModel import TopologyModel from .uuids.Context import context_get_uuid from .uuids.Topology import topology_get_uuid diff --git a/src/context/service/database/methods/uuids/__init__.py b/src/context/service/database/methods/uuids/__init__.py deleted file mode 100644 index 9953c8205..000000000 --- a/src/context/service/database/methods/uuids/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/src/context/service/database/models/ConfigRuleModel.py b/src/context/service/database/models/ConfigRuleModel.py index a229f475d..9d56344e8 100644 --- a/src/context/service/database/models/ConfigRuleModel.py +++ b/src/context/service/database/models/ConfigRuleModel.py @@ -13,9 +13,8 @@ # limitations under the License. import enum, json -from sqlalchemy import Column, INTEGER, CheckConstraint, Enum, ForeignKeyConstraint, String, UniqueConstraint, text +from sqlalchemy import CheckConstraint, Column, Enum, ForeignKey, Integer, String from sqlalchemy.dialects.postgresql import UUID -from sqlalchemy.orm import relationship from typing import Dict from .enums.ConfigAction import ORM_ConfigActionEnum from ._Base import _Base @@ -26,40 +25,19 @@ class ConfigRuleKindEnum(enum.Enum): ACL = 'acl' class ConfigRuleModel(_Base): - __tablename__ = 'config_rule' + __tablename__ = 'configrule' - config_rule_uuid = Column(UUID(as_uuid=False), primary_key=True, server_default=text('uuid_generate_v4()')) - device_uuid = Column(UUID(as_uuid=False)) # for device config rules - context_uuid = Column(UUID(as_uuid=False)) # for service/slice config rules - service_uuid = Column(UUID(as_uuid=False)) # for service config rules - #slice_uuid = Column(UUID(as_uuid=False)) # for slice config rules - kind = Column(Enum(ConfigRuleKindEnum)) - action = Column(Enum(ORM_ConfigActionEnum)) - position = Column(INTEGER, nullable=False) - data = Column(String, nullable=False) + configrule_uuid = Column(UUID(as_uuid=False), primary_key=True) + device_uuid = Column(ForeignKey('device.device_uuid', ondelete='CASCADE')) + position = Column(Integer, nullable=False) + kind = Column(Enum(ConfigRuleKindEnum)) + action = Column(Enum(ORM_ConfigActionEnum)) + data = Column(String, nullable=False) __table_args__ = ( CheckConstraint(position >= 0, name='check_position_value'), - UniqueConstraint('device_uuid', 'position', name='unique_per_device'), - UniqueConstraint('context_uuid', 'service_uuid', 'position', name='unique_per_service'), - #UniqueConstraint('context_uuid', 'slice_uuid', 'position', name='unique_per_slice'), - ForeignKeyConstraint( - ['device_uuid'], - ['device.device_uuid'], - ondelete='CASCADE'), - ForeignKeyConstraint( - ['context_uuid', 'service_uuid'], - ['service.context_uuid', 'service.service_uuid'], - ondelete='CASCADE'), - #ForeignKeyConstraint( - # ['context_uuid', 'slice_uuid'], - # ['slice.context_uuid', 'slice.slice_uuid'], - # ondelete='CASCADE'), + #UniqueConstraint('device_uuid', 'position', name='unique_per_device'), ) - device = relationship('DeviceModel', back_populates='config_rules') - service = relationship('ServiceModel', back_populates='config_rules') - #slice = relationship('SliceModel', back_populates='config_rules') - def dump(self) -> Dict: return {self.kind.value: json.loads(self.data)} diff --git a/src/context/service/database/models/DeviceModel.py b/src/context/service/database/models/DeviceModel.py index 33e780411..50db8e7bb 100644 --- a/src/context/service/database/models/DeviceModel.py +++ b/src/context/service/database/models/DeviceModel.py @@ -23,15 +23,16 @@ from ._Base import _Base class DeviceModel(_Base): __tablename__ = 'device' + device_uuid = Column(UUID(as_uuid=False), primary_key=True) device_name = Column(String, nullable=False) device_type = Column(String, nullable=False) device_operational_status = Column(Enum(ORM_DeviceOperationalStatusEnum)) device_drivers = Column(ARRAY(Enum(ORM_DeviceDriverEnum), dimensions=1)) - topology_devices = relationship('TopologyDeviceModel', back_populates='device') - #config_rules = relationship('ConfigRuleModel', passive_deletes=True, back_populates='device', lazy='joined') - endpoints = relationship('EndPointModel', passive_deletes=True, back_populates='device', lazy='joined') + #topology_devices = relationship('TopologyDeviceModel', back_populates='device') + config_rules = relationship('ConfigRuleModel', passive_deletes=True) # lazy='joined', back_populates='device' + endpoints = relationship('EndPointModel', passive_deletes=True) # lazy='joined', back_populates='device' def dump_id(self) -> Dict: return {'device_uuid': {'uuid': self.device_uuid}} @@ -44,8 +45,8 @@ class DeviceModel(_Base): 'device_operational_status': self.device_operational_status.value, 'device_drivers' : [driver.value for driver in self.device_drivers], 'device_config' : {'config_rules': [ - #config_rule.dump() - #for config_rule in sorted(self.config_rules, key=operator.attrgetter('position')) + config_rule.dump() + for config_rule in sorted(self.config_rules, key=operator.attrgetter('position')) ]}, 'device_endpoints' : [ endpoint.dump() diff --git a/src/context/service/database/models/EndPointModel.py b/src/context/service/database/models/EndPointModel.py index 804b68847..f9d5f7658 100644 --- a/src/context/service/database/models/EndPointModel.py +++ b/src/context/service/database/models/EndPointModel.py @@ -23,7 +23,7 @@ class EndPointModel(_Base): __tablename__ = 'endpoint' endpoint_uuid = Column(UUID(as_uuid=False), primary_key=True) - device_uuid = Column(UUID(as_uuid=False), ForeignKey('device.device_uuid', ondelete='CASCADE')) + device_uuid = Column(UUID(as_uuid=False), ForeignKey('device.device_uuid', ondelete='CASCADE' )) topology_uuid = Column(UUID(as_uuid=False), ForeignKey('topology.topology_uuid', ondelete='RESTRICT')) name = Column(String) endpoint_type = Column(String) diff --git a/src/context/service/database/models/LinkModel.py b/src/context/service/database/models/LinkModel.py index eec871e77..053dc0122 100644 --- a/src/context/service/database/models/LinkModel.py +++ b/src/context/service/database/models/LinkModel.py @@ -13,7 +13,7 @@ # limitations under the License. from typing import Dict -from sqlalchemy import Column, Float, String +from sqlalchemy import Column, String from sqlalchemy.dialects.postgresql import UUID from sqlalchemy.orm import relationship from ._Base import _Base @@ -23,9 +23,8 @@ class LinkModel(_Base): link_uuid = Column(UUID(as_uuid=False), primary_key=True) link_name = Column(String, nullable=False) - created_at = Column(Float) - topology_links = relationship('TopologyLinkModel', back_populates='link') + #topology_links = relationship('TopologyLinkModel', back_populates='link') link_endpoints = relationship('LinkEndPointModel', back_populates='link') #, lazy='joined') def dump_id(self) -> Dict: diff --git a/src/context/service/database/models/RelationModels.py b/src/context/service/database/models/RelationModels.py index 38d93bee7..89e8e05e0 100644 --- a/src/context/service/database/models/RelationModels.py +++ b/src/context/service/database/models/RelationModels.py @@ -12,8 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -from sqlalchemy import Column, ForeignKey, ForeignKeyConstraint -from sqlalchemy.dialects.postgresql import UUID +from sqlalchemy import Column, ForeignKey #, ForeignKeyConstraint +#from sqlalchemy.dialects.postgresql import UUID from sqlalchemy.orm import relationship from ._Base import _Base @@ -22,28 +22,14 @@ from ._Base import _Base # connection_fk = ForeignKeyField(ConnectionModel) # sub_service_fk = ForeignKeyField(ServiceModel) -#class LinkEndPointModel(_Base): -# __tablename__ = 'link_endpoint' -# -# link_uuid = Column(UUID(as_uuid=False), primary_key=True) -# context_uuid = Column(UUID(as_uuid=False), primary_key=True) -# topology_uuid = Column(UUID(as_uuid=False), primary_key=True) -# device_uuid = Column(UUID(as_uuid=False), primary_key=True) -# endpoint_uuid = Column(UUID(as_uuid=False), primary_key=True) -# -# link = relationship('LinkModel', back_populates='link_endpoints', lazy='joined') -# endpoint = relationship('EndPointModel', back_populates='link_endpoints', lazy='joined') -# -# __table_args__ = ( -# ForeignKeyConstraint( -# ['link_uuid'], -# ['link.link_uuid'], -# ondelete='CASCADE'), -# ForeignKeyConstraint( -# ['context_uuid', 'topology_uuid', 'device_uuid', 'endpoint_uuid'], -# ['endpoint.context_uuid', 'endpoint.topology_uuid', 'endpoint.device_uuid', 'endpoint.endpoint_uuid'], -# ondelete='CASCADE'), -# ) +class LinkEndPointModel(_Base): + __tablename__ = 'link_endpoint' + + link_uuid = Column(ForeignKey('link.link_uuid', ondelete='CASCADE' ), primary_key=True) + endpoint_uuid = Column(ForeignKey('endpoint.endpoint_uuid', ondelete='RESTRICT'), primary_key=True) + + link = relationship('LinkModel', back_populates='link_endpoints', lazy='joined') + endpoint = relationship('EndPointModel', lazy='joined') # back_populates='link_endpoints' #class ServiceEndPointModel(_Base): # __tablename__ = 'service_endpoint' @@ -94,26 +80,14 @@ class TopologyDeviceModel(_Base): topology_uuid = Column(ForeignKey('topology.topology_uuid', ondelete='RESTRICT'), primary_key=True) device_uuid = Column(ForeignKey('device.device_uuid', ondelete='CASCADE' ), primary_key=True) - topology = relationship('TopologyModel', back_populates='topology_devices', lazy='joined') - device = relationship('DeviceModel', back_populates='topology_devices', lazy='joined') + #topology = relationship('TopologyModel', lazy='joined') # back_populates='topology_devices' + device = relationship('DeviceModel', lazy='joined') # back_populates='topology_devices' -#class TopologyLinkModel(_Base): -# __tablename__ = 'topology_link' -# -# context_uuid = Column(UUID(as_uuid=False), primary_key=True) -# topology_uuid = Column(UUID(as_uuid=False), primary_key=True) -# link_uuid = Column(UUID(as_uuid=False), primary_key=True) -# -# topology = relationship('TopologyModel', back_populates='topology_links', lazy='joined') -# link = relationship('LinkModel', back_populates='topology_links', lazy='joined') -# -# __table_args__ = ( -# ForeignKeyConstraint( -# ['context_uuid', 'topology_uuid'], -# ['topology.context_uuid', 'topology.topology_uuid'], -# ondelete='CASCADE'), -# ForeignKeyConstraint( -# ['link_uuid'], -# ['link.link_uuid'], -# ondelete='CASCADE'), -# ) +class TopologyLinkModel(_Base): + __tablename__ = 'topology_link' + + topology_uuid = Column(ForeignKey('topology.topology_uuid', ondelete='RESTRICT'), primary_key=True) + link_uuid = Column(ForeignKey('link.link_uuid', ondelete='CASCADE' ), primary_key=True) + + #topology = relationship('TopologyModel', lazy='joined') # back_populates='topology_links' + link = relationship('LinkModel', lazy='joined') # back_populates='topology_links' diff --git a/src/context/service/database/models/TopologyModel.py b/src/context/service/database/models/TopologyModel.py index f7053b603..e0119bead 100644 --- a/src/context/service/database/models/TopologyModel.py +++ b/src/context/service/database/models/TopologyModel.py @@ -25,9 +25,9 @@ class TopologyModel(_Base): context_uuid = Column(UUID(as_uuid=False), ForeignKey('context.context_uuid')) topology_name = Column(String, nullable=False) - context = relationship('ContextModel', back_populates='topologies') - topology_devices = relationship('TopologyDeviceModel', back_populates='topology') - #topology_links = relationship('TopologyLinkModel', back_populates='topology') + context = relationship('ContextModel', back_populates='topologies') + topology_devices = relationship('TopologyDeviceModel') # back_populates='topology' + topology_links = relationship('TopologyLinkModel' ) # back_populates='topology' def dump_id(self) -> Dict: return { @@ -40,5 +40,5 @@ class TopologyModel(_Base): 'topology_id': self.dump_id(), 'name' : self.topology_name, 'device_ids' : [{'device_uuid': {'uuid': td.device_uuid}} for td in self.topology_devices], - #'link_ids' : [{'link_uuid' : {'uuid': td.link_uuid }} for td in self.topology_links ], + 'link_ids' : [{'link_uuid' : {'uuid': tl.link_uuid }} for tl in self.topology_links ], } diff --git a/src/context/service/database/methods/uuids/Context.py b/src/context/service/database/uuids/Context.py similarity index 100% rename from src/context/service/database/methods/uuids/Context.py rename to src/context/service/database/uuids/Context.py diff --git a/src/context/service/database/methods/uuids/Device.py b/src/context/service/database/uuids/Device.py similarity index 100% rename from src/context/service/database/methods/uuids/Device.py rename to src/context/service/database/uuids/Device.py diff --git a/src/context/service/database/methods/uuids/EndPoint.py b/src/context/service/database/uuids/EndPoint.py similarity index 100% rename from src/context/service/database/methods/uuids/EndPoint.py rename to src/context/service/database/uuids/EndPoint.py diff --git a/src/context/service/database/methods/uuids/Link.py b/src/context/service/database/uuids/Link.py similarity index 100% rename from src/context/service/database/methods/uuids/Link.py rename to src/context/service/database/uuids/Link.py diff --git a/src/context/service/database/methods/uuids/Topology.py b/src/context/service/database/uuids/Topology.py similarity index 100% rename from src/context/service/database/methods/uuids/Topology.py rename to src/context/service/database/uuids/Topology.py diff --git a/src/context/service/database/methods/uuids/_Builder.py b/src/context/service/database/uuids/_Builder.py similarity index 100% rename from src/context/service/database/methods/uuids/_Builder.py rename to src/context/service/database/uuids/_Builder.py diff --git a/src/context/service/database/methods/__init__.py b/src/context/service/database/uuids/__init__.py similarity index 100% rename from src/context/service/database/methods/__init__.py rename to src/context/service/database/uuids/__init__.py diff --git a/src/context/tests/Objects.py b/src/context/tests/Objects.py index 1e50fe3c1..c350d4f20 100644 --- a/src/context/tests/Objects.py +++ b/src/context/tests/Objects.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Dict, List, Tuple from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME from common.proto.kpi_sample_types_pb2 import KpiSampleType from common.tools.object_factory.ConfigRule import json_config_rule_set @@ -48,167 +49,96 @@ PACKET_PORT_SAMPLE_TYPES = [ # ----- Device --------------------------------------------------------------------------------------------------------- -EP1 = '5610e2c0-8abe-4127-80d0-7c68aff1c19e' -EP2 = '7eb80584-2587-4e71-b10c-f3a5c48e84ab' -EP3 = '368baf47-0540-4ab4-add8-a19b5167162c' -EP100 = '6a923121-36e1-4b5e-8cd6-90aceca9b5cf' - - -DEVICE_R1_NAME = 'R1' -DEVICE_R1_ID = json_device_id(DEVICE_R1_NAME) -DEVICE_R1_EPS = [ - json_endpoint(DEVICE_R1_ID, EP2, '10G', topology_id=TOPOLOGY_ID, kpi_sample_types=PACKET_PORT_SAMPLE_TYPES), - json_endpoint(DEVICE_R1_ID, EP3, '10G', topology_id=TOPOLOGY_ID, kpi_sample_types=PACKET_PORT_SAMPLE_TYPES), - json_endpoint(DEVICE_R1_ID, EP100, '10G', topology_id=TOPOLOGY_ID, kpi_sample_types=PACKET_PORT_SAMPLE_TYPES), -] -DEVICE_R1_RULES = [ - json_config_rule_set('dev/rsrc1/value', 'value1'), - json_config_rule_set('dev/rsrc2/value', 'value2'), - json_config_rule_set('dev/rsrc3/value', 'value3'), -] -DEVICE_R1 = json_device_packetrouter_disabled( - DEVICE_R1_NAME, endpoints=DEVICE_R1_EPS, config_rules=DEVICE_R1_RULES) - - -DEVICE_R2_NAME = 'R2' -DEVICE_R2_ID = json_device_id(DEVICE_R2_NAME) -DEVICE_R2_EPS = [ - json_endpoint(DEVICE_R2_ID, EP1, '10G', topology_id=TOPOLOGY_ID, kpi_sample_types=PACKET_PORT_SAMPLE_TYPES), - json_endpoint(DEVICE_R2_ID, EP3, '10G', topology_id=TOPOLOGY_ID, kpi_sample_types=PACKET_PORT_SAMPLE_TYPES), - json_endpoint(DEVICE_R2_ID, EP100, '10G', topology_id=TOPOLOGY_ID, kpi_sample_types=PACKET_PORT_SAMPLE_TYPES), -] -DEVICE_R2_RULES = [ - json_config_rule_set('dev/rsrc1/value', 'value4'), - json_config_rule_set('dev/rsrc2/value', 'value5'), - json_config_rule_set('dev/rsrc3/value', 'value6'), -] -DEVICE_R2 = json_device_packetrouter_disabled( - DEVICE_R2_NAME, endpoints=DEVICE_R2_EPS, config_rules=DEVICE_R2_RULES) - - -DEVICE_R3_NAME = 'R3' -DEVICE_R3_ID = json_device_id(DEVICE_R3_NAME) -DEVICE_R3_EPS = [ - json_endpoint(DEVICE_R3_ID, EP2, '10G', topology_id=TOPOLOGY_ID, kpi_sample_types=PACKET_PORT_SAMPLE_TYPES), - json_endpoint(DEVICE_R3_ID, EP3, '10G', topology_id=TOPOLOGY_ID, kpi_sample_types=PACKET_PORT_SAMPLE_TYPES), - json_endpoint(DEVICE_R3_ID, EP100, '10G', topology_id=TOPOLOGY_ID, kpi_sample_types=PACKET_PORT_SAMPLE_TYPES), -] -DEVICE_R3_RULES = [ - json_config_rule_set('dev/rsrc1/value', 'value4'), - json_config_rule_set('dev/rsrc2/value', 'value5'), - json_config_rule_set('dev/rsrc3/value', 'value6'), -] -DEVICE_R3 = json_device_packetrouter_disabled( - DEVICE_R3_NAME, endpoints=DEVICE_R3_EPS, config_rules=DEVICE_R3_RULES) +def compose_device(name : str, endpoint_names : List[str]) -> Tuple[str, Dict, Dict]: + device_id = json_device_id(name) + endpoints = [ + json_endpoint(device_id, endpoint_name, 'copper', topology_id=TOPOLOGY_ID, + kpi_sample_types=PACKET_PORT_SAMPLE_TYPES) + for endpoint_name in endpoint_names + ] + config_rules = [ + json_config_rule_set('dev/rsrc1/value', 'value1'), + json_config_rule_set('dev/rsrc2/value', 'value2'), + json_config_rule_set('dev/rsrc3/value', 'value3'), + ] + device = json_device_packetrouter_disabled(name, endpoints=endpoints, config_rules=config_rules) + return name, device_id, device + +DEVICE_R1_NAME, DEVICE_R1_ID, DEVICE_R1 = compose_device('R1', ['1.2', '1.3', '2.2', '2.3']) +DEVICE_R2_NAME, DEVICE_R2_ID, DEVICE_R2 = compose_device('R2', ['1.1', '1.3', '2.1', '2.3']) +DEVICE_R3_NAME, DEVICE_R3_ID, DEVICE_R3 = compose_device('R3', ['1.1', '1.2', '2.1', '2.2']) # ----- Link ----------------------------------------------------------------------------------------------------------- -LINK_R1_R2_UUID = 'c8f92eec-340e-4d31-8d7e-7074927dc889' -LINK_R1_R2_ID = json_link_id(LINK_R1_R2_UUID) -LINK_R1_R2_EPIDS = [ - json_endpoint_id(DEVICE_R1_ID, EP2, topology_id=TOPOLOGY_ID), - json_endpoint_id(DEVICE_R2_ID, EP1, topology_id=TOPOLOGY_ID), -] -LINK_R1_R2 = json_link(LINK_R1_R2_UUID, LINK_R1_R2_EPIDS) - - -LINK_R2_R3_UUID = 'f9e3539a-d8f9-4737-b4b4-cacf7f90aa5d' -LINK_R2_R3_ID = json_link_id(LINK_R2_R3_UUID) -LINK_R2_R3_EPIDS = [ - json_endpoint_id(DEVICE_R2_ID, EP3, topology_id=TOPOLOGY_ID), - json_endpoint_id(DEVICE_R3_ID, EP2, topology_id=TOPOLOGY_ID), -] -LINK_R2_R3 = json_link(LINK_R2_R3_UUID, LINK_R2_R3_EPIDS) - +def compose_link(name : str, endpoint_ids : List[Tuple[str, str]]) -> Tuple[str, Dict, Dict]: + link_id = json_link_id(name) + endpoint_ids = [ + json_endpoint_id(device_id, endpoint_name, topology_id=TOPOLOGY_ID) + for device_id, endpoint_name in endpoint_ids + ] + link = json_link(name, endpoint_ids) + return name, link_id, link -LINK_R1_R3_UUID = '1f1a988c-47a9-41b2-afd9-ebd6d434a0b4' -LINK_R1_R3_ID = json_link_id(LINK_R1_R3_UUID) -LINK_R1_R3_EPIDS = [ - json_endpoint_id(DEVICE_R1_ID, EP3, topology_id=TOPOLOGY_ID), - json_endpoint_id(DEVICE_R3_ID, EP1, topology_id=TOPOLOGY_ID), -] -LINK_R1_R3 = json_link(LINK_R1_R3_UUID, LINK_R1_R3_EPIDS) +LINK_R1_R2_NAME, LINK_R1_R2_ID, LINK_R1_R2 = compose_link('R1==R2', [(DEVICE_R1_ID, '1.2'), (DEVICE_R2_ID, '1.1')]) +LINK_R2_R3_NAME, LINK_R2_R3_ID, LINK_R2_R3 = compose_link('R2==R3', [(DEVICE_R2_ID, '1.3'), (DEVICE_R3_ID, '1.2')]) +LINK_R1_R3_NAME, LINK_R1_R3_ID, LINK_R1_R3 = compose_link('R1==R3', [(DEVICE_R1_ID, '1.3'), (DEVICE_R3_ID, '1.1')]) # ----- Service -------------------------------------------------------------------------------------------------------- -SERVICE_R1_R2_UUID = 'f0432e7b-bb83-4880-9c5d-008c4925ce7d' -SERVICE_R1_R2_ID = json_service_id(SERVICE_R1_R2_UUID, context_id=CONTEXT_ID) -SERVICE_R1_R2_EPIDS = [ - json_endpoint_id(DEVICE_R1_ID, EP100, topology_id=TOPOLOGY_ID), - json_endpoint_id(DEVICE_R2_ID, EP100, topology_id=TOPOLOGY_ID), -] -SERVICE_R1_R2_CONST = [ - json_constraint_custom('latency[ms]', '15.2'), - json_constraint_custom('jitter[us]', '1.2'), -] -SERVICE_R1_R2_RULES = [ - json_config_rule_set('svc/rsrc1/value', 'value7'), - json_config_rule_set('svc/rsrc2/value', 'value8'), - json_config_rule_set('svc/rsrc3/value', 'value9'), -] -SERVICE_R1_R2 = json_service_l3nm_planned( - SERVICE_R1_R2_UUID, endpoint_ids=SERVICE_R1_R2_EPIDS, constraints=SERVICE_R1_R2_CONST, - config_rules=SERVICE_R1_R2_RULES) - - -SERVICE_R1_R3_UUID = 'fab21cef-542a-4948-bb4a-a0468abfa925' -SERVICE_R1_R3_ID = json_service_id(SERVICE_R1_R3_UUID, context_id=CONTEXT_ID) -SERVICE_R1_R3_EPIDS = [ - json_endpoint_id(DEVICE_R1_ID, 'EP100', topology_id=TOPOLOGY_ID), - json_endpoint_id(DEVICE_R3_ID, 'EP100', topology_id=TOPOLOGY_ID), -] -SERVICE_R1_R3_CONST = [ - json_constraint_custom('latency[ms]', '5.8'), - json_constraint_custom('jitter[us]', '0.1'), -] -SERVICE_R1_R3_RULES = [ - json_config_rule_set('svc/rsrc1/value', 'value7'), - json_config_rule_set('svc/rsrc2/value', 'value8'), - json_config_rule_set('svc/rsrc3/value', 'value9'), -] -SERVICE_R1_R3 = json_service_l3nm_planned( - SERVICE_R1_R3_UUID, endpoint_ids=SERVICE_R1_R3_EPIDS, constraints=SERVICE_R1_R3_CONST, - config_rules=SERVICE_R1_R3_RULES) - - -SERVICE_R2_R3_UUID = '1f2a808f-62bb-4eaa-94fb-448ed643e61a' -SERVICE_R2_R3_ID = json_service_id(SERVICE_R2_R3_UUID, context_id=CONTEXT_ID) -SERVICE_R2_R3_EPIDS = [ - json_endpoint_id(DEVICE_R2_ID, 'EP100', topology_id=TOPOLOGY_ID), - json_endpoint_id(DEVICE_R3_ID, 'EP100', topology_id=TOPOLOGY_ID), -] -SERVICE_R2_R3_CONST = [ - json_constraint_custom('latency[ms]', '23.1'), - json_constraint_custom('jitter[us]', '3.4'), -] -SERVICE_R2_R3_RULES = [ - json_config_rule_set('svc/rsrc1/value', 'value7'), - json_config_rule_set('svc/rsrc2/value', 'value8'), - json_config_rule_set('svc/rsrc3/value', 'value9'), -] -SERVICE_R2_R3 = json_service_l3nm_planned( - SERVICE_R2_R3_UUID, endpoint_ids=SERVICE_R2_R3_EPIDS, constraints=SERVICE_R2_R3_CONST, - config_rules=SERVICE_R2_R3_RULES) +def compose_service( + name : str, endpoint_ids : List[Tuple[str, str]], latency_ms : float, jitter_us : float +) -> Tuple[str, Dict, Dict]: + service_id = json_service_id(name, context_id=CONTEXT_ID) + endpoint_ids = [ + json_endpoint_id(device_id, endpoint_name, topology_id=TOPOLOGY_ID) + for device_id, endpoint_name in endpoint_ids + ] + constraints = [ + json_constraint_custom('latency[ms]', str(latency_ms)), + json_constraint_custom('jitter[us]', str(jitter_us)), + ] + config_rules = [ + json_config_rule_set('svc/rsrc1/value', 'value7'), + json_config_rule_set('svc/rsrc2/value', 'value8'), + json_config_rule_set('svc/rsrc3/value', 'value9'), + ] + service = json_service_l3nm_planned( + name, endpoint_ids=endpoint_ids, constraints=constraints, config_rules=config_rules) + return name, service_id, service + +SERVICE_R1_R2_NAME, SERVICE_R1_R2_ID, SERVICE_R1_R2 = compose_service( + 'R1-R2', [(DEVICE_R1_ID, '2.2'), (DEVICE_R2_ID, '2.1')], 15.2, 1.2) + +SERVICE_R1_R3_NAME, SERVICE_R1_R3_ID, SERVICE_R1_R3 = compose_service( + 'R1-R3', [(DEVICE_R1_ID, '2.3'), (DEVICE_R3_ID, '2.1')], 5.8, 0.1) + +SERVICE_R2_R3_NAME, SERVICE_R2_R3_ID, SERVICE_R2_R3 = compose_service( + 'R2-R3', [(DEVICE_R2_ID, '2.3'), (DEVICE_R3_ID, '2.2')], 23.1, 3.4) # ----- Connection ----------------------------------------------------------------------------------------------------- -CONNECTION_R1_R3_UUID = 'CON:R1/EP100-R3/EP100' -CONNECTION_R1_R3_ID = json_connection_id(CONNECTION_R1_R3_UUID) -CONNECTION_R1_R3_EPIDS = [ - json_endpoint_id(DEVICE_R1_ID, 'EP100', topology_id=TOPOLOGY_ID), - json_endpoint_id(DEVICE_R1_ID, 'EP2', topology_id=TOPOLOGY_ID), - json_endpoint_id(DEVICE_R2_ID, 'EP1', topology_id=TOPOLOGY_ID), - json_endpoint_id(DEVICE_R2_ID, 'EP3', topology_id=TOPOLOGY_ID), - json_endpoint_id(DEVICE_R3_ID, 'EP2', topology_id=TOPOLOGY_ID), - json_endpoint_id(DEVICE_R3_ID, 'EP100', topology_id=TOPOLOGY_ID), -] -CONNECTION_R1_R3_SVCIDS = [SERVICE_R1_R2_ID, SERVICE_R2_R3_ID] -CONNECTION_R1_R3 = json_connection( - CONNECTION_R1_R3_UUID, service_id=SERVICE_R1_R3_ID, path_hops_endpoint_ids=CONNECTION_R1_R3_EPIDS, - sub_service_ids=CONNECTION_R1_R3_SVCIDS) +def compose_connection( + name : str, service_id : Dict, endpoint_ids : List[Tuple[str, str]], sub_service_ids : List[Dict] = [] +) -> Tuple[str, Dict, Dict]: + connection_id = json_connection_id(name) + endpoint_ids = [ + json_endpoint_id(device_id, endpoint_name, topology_id=TOPOLOGY_ID) + for device_id, endpoint_name in endpoint_ids + ] + connection = json_connection( + name, service_id=service_id, path_hops_endpoint_ids=endpoint_ids, sub_service_ids=sub_service_ids) + return name, connection_id, connection + +CONNECTION_R1_R3_NAME, CONNECTION_R1_R3_ID, CONNECTION_R1_R3 = compose_connection( + 'CON:R1/2.3-R3/2.1', SERVICE_R1_R3_ID, [ + (DEVICE_R1_ID, '2.3'), + (DEVICE_R1_ID, '1.2'), (DEVICE_R2_ID, '1.1'), + (DEVICE_R2_ID, '1.3'), (DEVICE_R3_ID, '1.2'), + (DEVICE_R3_ID, '2.1') + ], sub_service_ids=[SERVICE_R1_R2_ID, SERVICE_R2_R3_ID]) # ----- PolicyRule ------------------------------------------------------------------------------------------------------- -POLICY_RULE_UUID = '56380225-3e40-4f74-9162-529f8dcb96a1' -POLICY_RULE_ID = json_policy_rule_id(POLICY_RULE_UUID) -POLICY_RULE = json_policy_rule(POLICY_RULE_UUID) +POLICY_RULE_NAME = '56380225-3e40-4f74-9162-529f8dcb96a1' +POLICY_RULE_ID = json_policy_rule_id(POLICY_RULE_NAME) +POLICY_RULE = json_policy_rule(POLICY_RULE_NAME) diff --git a/src/context/tests/conftest.py b/src/context/tests/conftest.py index 872c51ccf..8bf4156c5 100644 --- a/src/context/tests/conftest.py +++ b/src/context/tests/conftest.py @@ -48,7 +48,7 @@ def context_db_mb(request) -> Tuple[sqlalchemy.engine.Engine, MessageBroker]: yield _db_engine, _msg_broker _msg_broker.terminate() -RAW_METRICS = None +RAW_METRICS = dict() @pytest.fixture(scope='session') def context_service(context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name diff --git a/src/context/tests/test_context.py b/src/context/tests/test_context.py index 915989eb7..443d36c92 100644 --- a/src/context/tests/test_context.py +++ b/src/context/tests/test_context.py @@ -15,7 +15,7 @@ import copy, grpc, pytest from common.proto.context_pb2 import Context, ContextId, Empty from context.client.ContextClient import ContextClient -from context.service.database.methods.uuids.Context import context_get_uuid +from context.service.database.uuids.Context import context_get_uuid #from context.client.EventsCollector import EventsCollector from .Objects import CONTEXT, CONTEXT_ID, CONTEXT_NAME diff --git a/src/context/tests/test_device.py b/src/context/tests/test_device.py index 381b5d4fd..e53ad747c 100644 --- a/src/context/tests/test_device.py +++ b/src/context/tests/test_device.py @@ -16,7 +16,7 @@ import copy, grpc, pytest from common.proto.context_pb2 import ( Context, ContextId, Device, DeviceDriverEnum, DeviceId, DeviceOperationalStatusEnum, Empty, Topology, TopologyId) from context.client.ContextClient import ContextClient -from context.service.database.methods.uuids.Device import device_get_uuid +from context.service.database.uuids.Device import device_get_uuid #from context.client.EventsCollector import EventsCollector from .Objects import CONTEXT, CONTEXT_ID, DEVICE_R1, DEVICE_R1_ID, DEVICE_R1_NAME, TOPOLOGY, TOPOLOGY_ID @@ -88,11 +88,11 @@ def test_device(context_client : ContextClient) -> None: assert response.device_id.device_uuid.uuid == device_uuid assert response.name == DEVICE_R1_NAME assert response.device_type == 'packet-router' - #assert len(response.device_config.config_rules) == 3 + assert len(response.device_config.config_rules) == 3 assert response.device_operational_status == DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_DISABLED assert len(response.device_drivers) == 1 assert DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG in response.device_drivers - assert len(response.device_endpoints) == 3 + assert len(response.device_endpoints) == 4 # ----- List when the object exists -------------------------------------------------------------------------------- response = context_client.ListDeviceIds(Empty()) @@ -104,11 +104,11 @@ def test_device(context_client : ContextClient) -> None: assert response.devices[0].device_id.device_uuid.uuid == device_uuid assert response.devices[0].name == DEVICE_R1_NAME assert response.devices[0].device_type == 'packet-router' - #assert len(response.devices[0].device_config.config_rules) == 3 + assert len(response.devices[0].device_config.config_rules) == 3 assert response.devices[0].device_operational_status == DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_DISABLED assert len(response.devices[0].device_drivers) == 1 assert DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG in response.devices[0].device_drivers - assert len(response.devices[0].device_endpoints) == 3 + assert len(response.devices[0].device_endpoints) == 4 # ----- Update the object ------------------------------------------------------------------------------------------ new_device_name = 'new' @@ -131,12 +131,12 @@ def test_device(context_client : ContextClient) -> None: assert response.device_id.device_uuid.uuid == device_uuid assert response.name == new_device_name assert response.device_type == 'packet-router' - #assert len(response.device_config.config_rules) == 3 + assert len(response.device_config.config_rules) == 3 assert response.device_operational_status == DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED assert len(response.device_drivers) == 2 assert DeviceDriverEnum.DEVICEDRIVER_UNDEFINED in response.device_drivers assert DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG in response.device_drivers - assert len(response.device_endpoints) == 3 + assert len(response.device_endpoints) == 4 # ----- List when the object is modified --------------------------------------------------------------------------- response = context_client.ListDeviceIds(Empty()) @@ -148,12 +148,12 @@ def test_device(context_client : ContextClient) -> None: assert response.devices[0].device_id.device_uuid.uuid == device_uuid assert response.devices[0].name == new_device_name assert response.devices[0].device_type == 'packet-router' - #assert len(response.devices[0].device_config.config_rules) == 3 + assert len(response.devices[0].device_config.config_rules) == 3 assert response.devices[0].device_operational_status == DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED assert len(response.devices[0].device_drivers) == 2 assert DeviceDriverEnum.DEVICEDRIVER_UNDEFINED in response.devices[0].device_drivers assert DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG in response.devices[0].device_drivers - assert len(response.devices[0].device_endpoints) == 3 + assert len(response.devices[0].device_endpoints) == 4 # ----- Create object relation ------------------------------------------------------------------------------------- #TOPOLOGY_WITH_DEVICE = copy.deepcopy(TOPOLOGY) @@ -163,11 +163,11 @@ def test_device(context_client : ContextClient) -> None: #assert response.topology_uuid.uuid == topology_uuid # ----- Check update event ----------------------------------------------------------------------------------------- - # event = events_collector.get_event(block=True) - # assert isinstance(event, TopologyEvent) - # assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - # assert response.context_id.context_uuid.uuid == context_uuid - # assert response.topology_uuid.uuid == topology_uuid + #event = events_collector.get_event(block=True) + #assert isinstance(event, TopologyEvent) + #assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + #assert response.context_id.context_uuid.uuid == context_uuid + #assert response.topology_uuid.uuid == topology_uuid # ----- Check relation was created --------------------------------------------------------------------------------- response = context_client.GetTopology(TopologyId(**TOPOLOGY_ID)) @@ -178,7 +178,7 @@ def test_device(context_client : ContextClient) -> None: assert len(response.link_ids) == 0 # ----- Remove the object ------------------------------------------------------------------------------------------ - #context_client.RemoveDevice(DeviceId(**DEVICE_R1_ID)) + context_client.RemoveDevice(DeviceId(**DEVICE_R1_ID)) # ----- Check remove event ----------------------------------------------------------------------------------------- #event = events_collector.get_event(block=True) @@ -187,15 +187,21 @@ def test_device(context_client : ContextClient) -> None: #assert event.device_id.device_uuid.uuid == device_uuid # ----- List after deleting the object ----------------------------------------------------------------------------- - #response = context_client.ListDeviceIds(Empty()) - #assert len(response.device_ids) == 0 + response = context_client.ListDeviceIds(Empty()) + assert len(response.device_ids) == 0 + + response = context_client.ListDevices(Empty()) + assert len(response.devices) == 0 - #response = context_client.ListDevices(Empty()) - #assert len(response.devices) == 0 + response = context_client.GetTopology(TopologyId(**TOPOLOGY_ID)) + assert response.topology_id.context_id.context_uuid.uuid == context_uuid + assert response.topology_id.topology_uuid.uuid == topology_uuid + assert len(response.device_ids) == 0 + assert len(response.link_ids) == 0 # ----- Clean dependencies used in the test and capture related events --------------------------------------------- - #context_client.RemoveTopology(TopologyId(**TOPOLOGY_ID)) - #context_client.RemoveContext(ContextId(**CONTEXT_ID)) + context_client.RemoveTopology(TopologyId(**TOPOLOGY_ID)) + context_client.RemoveContext(ContextId(**CONTEXT_ID)) #events = events_collector.get_events(block=True, count=2) #assert isinstance(events[0], TopologyEvent) diff --git a/src/context/tests/_test_link.py b/src/context/tests/test_link.py similarity index 51% rename from src/context/tests/_test_link.py rename to src/context/tests/test_link.py index 963fd72cf..ec767f1c9 100644 --- a/src/context/tests/_test_link.py +++ b/src/context/tests/test_link.py @@ -13,172 +13,194 @@ # limitations under the License. import copy, grpc, pytest -from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID from common.proto.context_pb2 import Context, ContextId, Device, DeviceId, Empty, Link, LinkId, Topology, TopologyId from context.client.ContextClient import ContextClient #from context.client.EventsCollector import EventsCollector +from context.service.database.uuids.Link import link_get_uuid from .Objects import ( - CONTEXT, CONTEXT_ID, DEVICE_R1, DEVICE_R1_ID, DEVICE_R1_UUID, DEVICE_R2, DEVICE_R2_ID, DEVICE_R2_UUID, LINK_R1_R2, - LINK_R1_R2_ID, LINK_R1_R2_UUID, TOPOLOGY, TOPOLOGY_ID) + CONTEXT, CONTEXT_ID, DEVICE_R1, DEVICE_R1_ID, DEVICE_R2, DEVICE_R2_ID, LINK_R1_R2, LINK_R1_R2_ID, LINK_R1_R2_NAME, + TOPOLOGY, TOPOLOGY_ID) -def grpc_link(context_client_grpc : ContextClient) -> None: +@pytest.mark.depends(on=['context/tests/test_device.py::test_device']) +def test_link(context_client : ContextClient) -> None: # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- #events_collector = EventsCollector( - # context_client_grpc, log_events_received=True, + # context_client, log_events_received=True, # activate_context_collector = False, activate_topology_collector = False, activate_device_collector = False, # activate_link_collector = True, activate_service_collector = False, activate_slice_collector = False, # activate_connection_collector = False) #events_collector.start() # ----- Prepare dependencies for the test and capture related events ----------------------------------------------- - response = context_client_grpc.SetContext(Context(**CONTEXT)) - assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID + response = context_client.SetContext(Context(**CONTEXT)) + context_uuid = response.context_uuid.uuid - response = context_client_grpc.SetTopology(Topology(**TOPOLOGY)) - assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + response = context_client.SetTopology(Topology(**TOPOLOGY)) + topology_uuid = response.topology_uuid.uuid - response = context_client_grpc.SetDevice(Device(**DEVICE_R1)) - assert response.device_uuid.uuid == DEVICE_R1_UUID + response = context_client.SetDevice(Device(**DEVICE_R1)) + device_r1_uuid = response.device_uuid.uuid - response = context_client_grpc.SetDevice(Device(**DEVICE_R2)) - assert response.device_uuid.uuid == DEVICE_R2_UUID + response = context_client.SetDevice(Device(**DEVICE_R2)) + device_r2_uuid = response.device_uuid.uuid # events = events_collector.get_events(block=True, count=4) # assert isinstance(events[0], ContextEvent) # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - # assert events[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + # assert events[0].context_id.context_uuid.uuid == context_uuid # assert isinstance(events[1], TopologyEvent) # assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - # assert events[1].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - # assert events[1].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + # assert events[1].topology_id.context_id.context_uuid.uuid == context_uuid + # assert events[1].topology_id.topology_uuid.uuid == topology_uuid # assert isinstance(events[2], DeviceEvent) # assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - # assert events[2].device_id.device_uuid.uuid == DEVICE_R1_UUID + # assert events[2].device_id.device_uuid.uuid == device_r1_uuid # assert isinstance(events[3], DeviceEvent) # assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - # assert events[3].device_id.device_uuid.uuid == DEVICE_R2_UUID + # assert events[3].device_id.device_uuid.uuid == device_r2_uuid # ----- Get when the object does not exist ------------------------------------------------------------------------- + link_id = LinkId(**LINK_R1_R2_ID) + link_uuid = link_get_uuid(link_id, allow_random=False) with pytest.raises(grpc.RpcError) as e: - context_client_grpc.GetLink(LinkId(**LINK_R1_R2_ID)) + context_client.GetLink(link_id) assert e.value.code() == grpc.StatusCode.NOT_FOUND - assert e.value.details() == 'Link({:s}) not found'.format(LINK_R1_R2_UUID) + MSG = 'Link({:s}) not found; link_uuid generated was: {:s}' + assert e.value.details() == MSG.format(LINK_R1_R2_NAME, link_uuid) # ----- List when the object does not exist ------------------------------------------------------------------------ - response = context_client_grpc.ListLinkIds(Empty()) + response = context_client.ListLinkIds(Empty()) assert len(response.link_ids) == 0 - response = context_client_grpc.ListLinks(Empty()) + response = context_client.ListLinks(Empty()) assert len(response.links) == 0 # ----- Create the object ------------------------------------------------------------------------------------------ - response = context_client_grpc.SetLink(Link(**LINK_R1_R2)) - assert response.link_uuid.uuid == LINK_R1_R2_UUID + response = context_client.SetLink(Link(**LINK_R1_R2)) + assert response.link_uuid.uuid == link_uuid # ----- Check create event ----------------------------------------------------------------------------------------- #event = events_collector.get_event(block=True) #assert isinstance(event, LinkEvent) #assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE - #assert event.link_id.link_uuid.uuid == LINK_R1_R2_UUID + #assert event.link_id.link_uuid.uuid == link_uuid # ----- Get when the object exists --------------------------------------------------------------------------------- - response = context_client_grpc.GetLink(LinkId(**LINK_R1_R2_ID)) - assert response.link_id.link_uuid.uuid == LINK_R1_R2_UUID - assert response.name == '' + response = context_client.GetLink(LinkId(**LINK_R1_R2_ID)) + assert response.link_id.link_uuid.uuid == link_uuid + assert response.name == LINK_R1_R2_NAME assert len(response.link_endpoint_ids) == 2 # ----- List when the object exists -------------------------------------------------------------------------------- - response = context_client_grpc.ListLinkIds(Empty()) + response = context_client.ListLinkIds(Empty()) assert len(response.link_ids) == 1 - assert response.link_ids[0].link_uuid.uuid == LINK_R1_R2_UUID + assert response.link_ids[0].link_uuid.uuid == link_uuid - response = context_client_grpc.ListLinks(Empty()) + response = context_client.ListLinks(Empty()) assert len(response.links) == 1 - assert response.links[0].link_id.link_uuid.uuid == LINK_R1_R2_UUID - assert response.links[0].name == '' + assert response.links[0].link_id.link_uuid.uuid == link_uuid + assert response.links[0].name == LINK_R1_R2_NAME assert len(response.links[0].link_endpoint_ids) == 2 # ----- Update the object ------------------------------------------------------------------------------------------ - new_link_name = 'l1' + new_link_name = 'new' LINK_UPDATED = copy.deepcopy(LINK_R1_R2) LINK_UPDATED['name'] = new_link_name - response = context_client_grpc.SetLink(Link(**LINK_UPDATED)) - assert response.link_uuid.uuid == LINK_R1_R2_UUID + response = context_client.SetLink(Link(**LINK_UPDATED)) + assert response.link_uuid.uuid == link_uuid # ----- Check update event ----------------------------------------------------------------------------------------- #event = events_collector.get_event(block=True) #assert isinstance(event, LinkEvent) #assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - #assert event.link_id.link_uuid.uuid == LINK_R1_R2_UUID + #assert event.link_id.link_uuid.uuid == link_uuid # ----- Get when the object is modified ---------------------------------------------------------------------------- - response = context_client_grpc.GetLink(LinkId(**LINK_R1_R2_ID)) - assert response.link_id.link_uuid.uuid == LINK_R1_R2_UUID + response = context_client.GetLink(LinkId(**LINK_R1_R2_ID)) + assert response.link_id.link_uuid.uuid == link_uuid assert response.name == new_link_name assert len(response.link_endpoint_ids) == 2 # ----- List when the object is modified --------------------------------------------------------------------------- - response = context_client_grpc.ListLinkIds(Empty()) + response = context_client.ListLinkIds(Empty()) assert len(response.link_ids) == 1 - assert response.link_ids[0].link_uuid.uuid == LINK_R1_R2_UUID + assert response.link_ids[0].link_uuid.uuid == link_uuid - response = context_client_grpc.ListLinks(Empty()) + response = context_client.ListLinks(Empty()) assert len(response.links) == 1 - assert response.links[0].link_id.link_uuid.uuid == LINK_R1_R2_UUID + assert response.links[0].link_id.link_uuid.uuid == link_uuid assert response.links[0].name == new_link_name assert len(response.links[0].link_endpoint_ids) == 2 # ----- Create object relation ------------------------------------------------------------------------------------- - TOPOLOGY_WITH_LINK = copy.deepcopy(TOPOLOGY) - TOPOLOGY_WITH_LINK['link_ids'].append(LINK_R1_R2_ID) - response = context_client_grpc.SetTopology(Topology(**TOPOLOGY_WITH_LINK)) - assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + #TOPOLOGY_WITH_LINK = copy.deepcopy(TOPOLOGY) + #TOPOLOGY_WITH_LINK['link_ids'].append(LINK_R1_R2_ID) + #response = context_client.SetTopology(Topology(**TOPOLOGY_WITH_LINK)) + #assert response.context_id.context_uuid.uuid == context_uuid + #assert response.topology_uuid.uuid == topology_uuid # ----- Check update event ----------------------------------------------------------------------------------------- #event = events_collector.get_event(block=True) #assert isinstance(event, TopologyEvent) #assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - #assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - #assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + #assert response.context_id.context_uuid.uuid == context_uuid + #assert response.topology_uuid.uuid == topology_uuid # ----- Check relation was created --------------------------------------------------------------------------------- - response = context_client_grpc.GetTopology(TopologyId(**TOPOLOGY_ID)) - assert response.topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + response = context_client.GetTopology(TopologyId(**TOPOLOGY_ID)) + assert response.topology_id.context_id.context_uuid.uuid == context_uuid + assert response.topology_id.topology_uuid.uuid == topology_uuid assert len(response.device_ids) == 2 - assert response.device_ids[0].device_uuid.uuid in {DEVICE_R1_UUID, DEVICE_R2_UUID} - assert response.device_ids[1].device_uuid.uuid in {DEVICE_R1_UUID, DEVICE_R2_UUID} + assert response.device_ids[0].device_uuid.uuid in {device_r1_uuid, device_r2_uuid} + assert response.device_ids[1].device_uuid.uuid in {device_r1_uuid, device_r2_uuid} assert len(response.link_ids) == 1 - assert response.link_ids[0].link_uuid.uuid == LINK_R1_R2_UUID + assert response.link_ids[0].link_uuid.uuid == link_uuid # ----- Remove the object ------------------------------------------------------------------------------------------ - context_client_grpc.RemoveLink(LinkId(**LINK_R1_R2_ID)) - context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R1_ID)) - context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R2_ID)) - context_client_grpc.RemoveTopology(TopologyId(**TOPOLOGY_ID)) - context_client_grpc.RemoveContext(ContextId(**CONTEXT_ID)) + context_client.RemoveLink(LinkId(**LINK_R1_R2_ID)) # ----- Check remove event ----------------------------------------------------------------------------------------- - #events = events_collector.get_events(block=True, count=5) - #assert isinstance(events[0], LinkEvent) + #event = events_collector.get_event(block=True) + #assert isinstance(event, LinkEvent) + #assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert event.link_id.link_uuid.uuid == link_uuid + + # ----- List after deleting the object ----------------------------------------------------------------------------- + response = context_client.ListLinkIds(Empty()) + assert len(response.link_ids) == 0 + + response = context_client.ListLinks(Empty()) + assert len(response.links) == 0 + + response = context_client.GetTopology(TopologyId(**TOPOLOGY_ID)) + assert response.topology_id.context_id.context_uuid.uuid == context_uuid + assert response.topology_id.topology_uuid.uuid == topology_uuid + assert len(response.device_ids) == 2 + assert response.device_ids[0].device_uuid.uuid in {device_r1_uuid, device_r2_uuid} + assert response.device_ids[1].device_uuid.uuid in {device_r1_uuid, device_r2_uuid} + assert len(response.link_ids) == 0 + + # ----- Clean dependencies used in the test and capture related events --------------------------------------------- + context_client.RemoveDevice(DeviceId(**DEVICE_R1_ID)) + context_client.RemoveDevice(DeviceId(**DEVICE_R2_ID)) + context_client.RemoveTopology(TopologyId(**TOPOLOGY_ID)) + context_client.RemoveContext(ContextId(**CONTEXT_ID)) + + #events = events_collector.get_events(block=True, count=4) + #assert isinstance(events[0], DeviceEvent) #assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - #assert events[0].link_id.link_uuid.uuid == LINK_R1_R2_UUID + #assert events[0].device_id.device_uuid.uuid == device_r1_uuid #assert isinstance(events[1], DeviceEvent) #assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - #assert events[1].device_id.device_uuid.uuid == DEVICE_R1_UUID - #assert isinstance(events[2], DeviceEvent) + #assert events[1].device_id.device_uuid.uuid == device_r2_uuid + #assert isinstance(events[2], TopologyEvent) #assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - #assert events[2].device_id.device_uuid.uuid == DEVICE_R2_UUID - #assert isinstance(events[3], TopologyEvent) + #assert events[2].topology_id.context_id.context_uuid.uuid == context_uuid + #assert events[2].topology_id.topology_uuid.uuid == topology_uuid + #assert isinstance(events[3], ContextEvent) #assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - #assert events[3].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - #assert events[3].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - #assert isinstance(events[4], ContextEvent) - #assert events[4].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - #assert events[4].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + #assert events[3].context_id.context_uuid.uuid == context_uuid # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- #events_collector.stop() diff --git a/src/context/tests/test_topology.py b/src/context/tests/test_topology.py index 142887d09..51b224007 100644 --- a/src/context/tests/test_topology.py +++ b/src/context/tests/test_topology.py @@ -15,7 +15,7 @@ import copy, grpc, pytest from common.proto.context_pb2 import Context, ContextId, Topology, TopologyId from context.client.ContextClient import ContextClient -from context.service.database.methods.uuids.Topology import topology_get_uuid +from context.service.database.uuids.Topology import topology_get_uuid #from context.client.EventsCollector import EventsCollector from .Objects import CONTEXT, CONTEXT_ID, CONTEXT_NAME, TOPOLOGY, TOPOLOGY_ID, TOPOLOGY_NAME diff --git a/test-context.sh b/test-context.sh index 7ad303ca9..79a9d5653 100755 --- a/test-context.sh +++ b/test-context.sh @@ -44,7 +44,8 @@ coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose --ma context/tests/test_hasher.py \ context/tests/test_context.py \ context/tests/test_topology.py \ - context/tests/test_device.py + context/tests/test_device.py \ + context/tests/test_link.py echo echo "Coverage report:" -- GitLab From 89fa7f98f76786c9b7eedff1aa4c49fa71012fd8 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Thu, 5 Jan 2023 18:43:35 +0000 Subject: [PATCH 025/158] Context component: - corrected ConfigRuleModel and methods - corrected ConstraintModel and methods - corrected ServiceModel and methods - corrected ServiceEndPointModel - added missing non-null constraints - removed redundant column definition data - removed unneeded lazy loading parameters - added Service UUID generator - implemented unitary test for Service entity --- .../service/ContextServiceServicerImpl.py | 44 +- src/context/service/database/ConfigRule.py | 185 +++++ src/context/service/database/Constraint.py | 110 +++ src/context/service/database/Device.py | 161 +--- src/context/service/database/Link.py | 4 - src/context/service/database/Service.py | 261 ++----- src/context/service/database/Topology.py | 43 +- .../database/models/ConfigRuleModel.py | 8 +- .../database/models/ConstraintModel.py | 720 +++++++++--------- .../service/database/models/ContextModel.py | 6 +- .../service/database/models/DeviceModel.py | 6 +- .../service/database/models/EndPointModel.py | 8 +- .../service/database/models/LinkModel.py | 2 +- .../service/database/models/RelationModels.py | 35 +- .../service/database/models/ServiceModel.py | 19 +- .../service/database/models/TopologyModel.py | 2 +- src/context/service/database/uuids/Service.py | 37 + .../{_test_service.py => test_service.py} | 181 +++-- src/context/tests/test_topology.py | 3 +- test-context.sh | 9 +- 20 files changed, 940 insertions(+), 904 deletions(-) create mode 100644 src/context/service/database/ConfigRule.py create mode 100644 src/context/service/database/Constraint.py create mode 100644 src/context/service/database/uuids/Service.py rename src/context/tests/{_test_service.py => test_service.py} (58%) diff --git a/src/context/service/ContextServiceServicerImpl.py b/src/context/service/ContextServiceServicerImpl.py index 6914e05a0..edb5095b9 100644 --- a/src/context/service/ContextServiceServicerImpl.py +++ b/src/context/service/ContextServiceServicerImpl.py @@ -38,7 +38,7 @@ from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered from .database.Context import context_delete, context_get, context_list_ids, context_list_objs, context_set from .database.Device import device_delete, device_get, device_list_ids, device_list_objs, device_set from .database.Link import link_delete, link_get, link_list_ids, link_list_objs, link_set -#from .database.Service import service_delete, service_get, service_list_ids, service_list_objs, service_set +from .database.Service import service_delete, service_get, service_list_ids, service_list_objs, service_set from .database.Topology import topology_delete, topology_get, topology_list_ids, topology_list_objs, topology_set #from common.tools.grpc.Tools import grpc_message_to_json, grpc_message_to_json_string #from context.service.Database import Database @@ -231,31 +231,31 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # ----- Service ---------------------------------------------------------------------------------------------------- -# @safe_and_metered_rpc_method(METRICS, LOGGER) -# def ListServiceIds(self, request : ContextId, context : grpc.ServicerContext) -> ServiceIdList: -# return service_list_ids(self.db_engine, request) + @safe_and_metered_rpc_method(METRICS, LOGGER) + def ListServiceIds(self, request : ContextId, context : grpc.ServicerContext) -> ServiceIdList: + return service_list_ids(self.db_engine, request) -# @safe_and_metered_rpc_method(METRICS, LOGGER) -# def ListServices(self, request : ContextId, context : grpc.ServicerContext) -> ServiceList: -# return service_list_objs(self.db_engine, request) + @safe_and_metered_rpc_method(METRICS, LOGGER) + def ListServices(self, request : ContextId, context : grpc.ServicerContext) -> ServiceList: + return service_list_objs(self.db_engine, request) -# @safe_and_metered_rpc_method(METRICS, LOGGER) -# def GetService(self, request : ServiceId, context : grpc.ServicerContext) -> Service: -# return service_get(self.db_engine, request) + @safe_and_metered_rpc_method(METRICS, LOGGER) + def GetService(self, request : ServiceId, context : grpc.ServicerContext) -> Service: + return service_get(self.db_engine, request) -# @safe_and_metered_rpc_method(METRICS, LOGGER) -# def SetService(self, request : Service, context : grpc.ServicerContext) -> ServiceId: -# service_id,updated = service_set(self.db_engine, request) -# #event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE -# #notify_event(self.messagebroker, TOPIC_SERVICE, event_type, {'service_id': service_id}) -# return service_id + @safe_and_metered_rpc_method(METRICS, LOGGER) + def SetService(self, request : Service, context : grpc.ServicerContext) -> ServiceId: + service_id,updated = service_set(self.db_engine, request) + #event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE + #notify_event(self.messagebroker, TOPIC_SERVICE, event_type, {'service_id': service_id}) + return service_id -# @safe_and_metered_rpc_method(METRICS, LOGGER) -# def RemoveService(self, request : ServiceId, context : grpc.ServicerContext) -> Empty: -# deleted = service_delete(self.db_engine, request) -# #if deleted: -# # notify_event(self.messagebroker, TOPIC_SERVICE, EventTypeEnum.EVENTTYPE_REMOVE, {'service_id': request}) -# return Empty() + @safe_and_metered_rpc_method(METRICS, LOGGER) + def RemoveService(self, request : ServiceId, context : grpc.ServicerContext) -> Empty: + deleted = service_delete(self.db_engine, request) + #if deleted: + # notify_event(self.messagebroker, TOPIC_SERVICE, EventTypeEnum.EVENTTYPE_REMOVE, {'service_id': request}) + return Empty() @safe_and_metered_rpc_method(METRICS, LOGGER) def GetServiceEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[ServiceEvent]: diff --git a/src/context/service/database/ConfigRule.py b/src/context/service/database/ConfigRule.py new file mode 100644 index 000000000..af1dd1ec5 --- /dev/null +++ b/src/context/service/database/ConfigRule.py @@ -0,0 +1,185 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from sqlalchemy import delete +from sqlalchemy.dialects.postgresql import insert +from sqlalchemy.orm import Session +from typing import Dict, List, Optional +from common.proto.context_pb2 import ConfigRule +from common.tools.grpc.Tools import grpc_message_to_json_string +from .models.enums.ConfigAction import grpc_to_enum__config_action +from .models.ConfigRuleModel import ConfigRuleKindEnum, ConfigRuleModel +from .uuids._Builder import get_uuid_random + +def compose_config_rules_data( + config_rules : List[ConfigRule], + device_uuid : Optional[str] = None, service_uuid : Optional[str] = None, slice_uuid : Optional[str] = None +) -> List[Dict]: + dict_config_rules : List[Dict] = list() + for position,config_rule in enumerate(config_rules): + configrule_uuid = get_uuid_random() + str_kind = config_rule.WhichOneof('config_rule') + dict_config_rule = { + 'configrule_uuid': configrule_uuid, + 'position' : position, + 'kind' : ConfigRuleKindEnum._member_map_.get(str_kind.upper()), # pylint: disable=no-member + 'action' : grpc_to_enum__config_action(config_rule.action), + 'data' : grpc_message_to_json_string(getattr(config_rule, str_kind, {})), + } + if device_uuid is not None: dict_config_rule['device_uuid' ] = device_uuid + if service_uuid is not None: dict_config_rule['service_uuid'] = service_uuid + if slice_uuid is not None: dict_config_rule['slice_uuid' ] = slice_uuid + dict_config_rules.append(dict_config_rule) + return dict_config_rules + +def upsert_config_rules( + session : Session, config_rules : List[Dict], + device_uuid : Optional[str] = None, service_uuid : Optional[str] = None, slice_uuid : Optional[str] = None +) -> None: + stmt = delete(ConfigRuleModel) + if device_uuid is not None: stmt = stmt.where(ConfigRuleModel.device_uuid == device_uuid ) + if service_uuid is not None: stmt = stmt.where(ConfigRuleModel.service_uuid == service_uuid) + if slice_uuid is not None: stmt = stmt.where(ConfigRuleModel.slice_uuid == slice_uuid ) + session.execute(stmt) + session.execute(insert(ConfigRuleModel).values(config_rules)) + + +#Union_SpecificConfigRule = Union[ +# ConfigRuleCustomModel, ConfigRuleAclModel +#] +# +#def set_config_rule( +# database : Database, db_config : ConfigModel, position : int, resource_key : str, resource_value : str, +#): # -> Tuple[ConfigRuleModel, bool]: +# +# str_rule_key_hash = fast_hasher(resource_key) +# str_config_rule_key = key_to_str([db_config.config_uuid, str_rule_key_hash], separator=':') +# +# data = {'config_fk': db_config, 'position': position, 'action': ORM_ConfigActionEnum.SET, 'key': resource_key, +# 'value': resource_value} +# to_add = ConfigRuleModel(**data) +# +# result = database.create_or_update(to_add) +# return result +#Tuple_ConfigRuleSpecs = Tuple[Type, str, Dict, ConfigRuleKindEnum] +# +#def parse_config_rule_custom(database : Database, grpc_config_rule) -> Tuple_ConfigRuleSpecs: +# config_rule_class = ConfigRuleCustomModel +# str_config_rule_id = grpc_config_rule.custom.resource_key +# config_rule_data = { +# 'key' : grpc_config_rule.custom.resource_key, +# 'value': grpc_config_rule.custom.resource_value, +# } +# return config_rule_class, str_config_rule_id, config_rule_data, ConfigRuleKindEnum.CUSTOM +# +#def parse_config_rule_acl(database : Database, grpc_config_rule) -> Tuple_ConfigRuleSpecs: +# config_rule_class = ConfigRuleAclModel +# grpc_endpoint_id = grpc_config_rule.acl.endpoint_id +# grpc_rule_set = grpc_config_rule.acl.rule_set +# device_uuid = grpc_endpoint_id.device_id.device_uuid.uuid +# endpoint_uuid = grpc_endpoint_id.endpoint_uuid.uuid +# str_endpoint_key = '/'.join([device_uuid, endpoint_uuid]) +# #str_endpoint_key, db_endpoint = get_endpoint(database, grpc_endpoint_id) +# str_config_rule_id = ':'.join([str_endpoint_key, grpc_rule_set.name]) +# config_rule_data = { +# #'endpoint_fk': db_endpoint, +# 'endpoint_id': grpc_message_to_json_string(grpc_endpoint_id), +# 'acl_data': grpc_message_to_json_string(grpc_rule_set), +# } +# return config_rule_class, str_config_rule_id, config_rule_data, ConfigRuleKindEnum.ACL +# +#CONFIGRULE_PARSERS = { +# 'custom': parse_config_rule_custom, +# 'acl' : parse_config_rule_acl, +#} +# +#Union_ConfigRuleModel = Union[ +# ConfigRuleCustomModel, ConfigRuleAclModel, +#] +# +#def set_config_rule( +# database : Database, db_config : ConfigModel, grpc_config_rule : ConfigRule, position : int +#) -> Tuple[Union_ConfigRuleModel, bool]: +# grpc_config_rule_kind = str(grpc_config_rule.WhichOneof('config_rule')) +# parser = CONFIGRULE_PARSERS.get(grpc_config_rule_kind) +# if parser is None: +# raise NotImplementedError('ConfigRule of kind {:s} is not implemented: {:s}'.format( +# grpc_config_rule_kind, grpc_message_to_json_string(grpc_config_rule))) +# +# # create specific ConfigRule +# config_rule_class, str_config_rule_id, config_rule_data, config_rule_kind = parser(database, grpc_config_rule) +# str_config_rule_key_hash = fast_hasher(':'.join([config_rule_kind.value, str_config_rule_id])) +# str_config_rule_key = key_to_str([db_config.pk, str_config_rule_key_hash], separator=':') +# result : Tuple[Union_ConfigRuleModel, bool] = update_or_create_object( +# database, config_rule_class, str_config_rule_key, config_rule_data) +# db_specific_config_rule, updated = result +# +# # create generic ConfigRule +# config_rule_fk_field_name = 'config_rule_{:s}_fk'.format(config_rule_kind.value) +# config_rule_data = { +# 'config_fk': db_config, 'kind': config_rule_kind, 'position': position, +# 'action': ORM_ConfigActionEnum.SET, +# config_rule_fk_field_name: db_specific_config_rule +# } +# result : Tuple[ConfigRuleModel, bool] = update_or_create_object( +# database, ConfigRuleModel, str_config_rule_key, config_rule_data) +# db_config_rule, updated = result +# +# return db_config_rule, updated +# +#def delete_config_rule( +# database : Database, db_config : ConfigModel, grpc_config_rule : ConfigRule +#) -> None: +# grpc_config_rule_kind = str(grpc_config_rule.WhichOneof('config_rule')) +# parser = CONFIGRULE_PARSERS.get(grpc_config_rule_kind) +# if parser is None: +# raise NotImplementedError('ConfigRule of kind {:s} is not implemented: {:s}'.format( +# grpc_config_rule_kind, grpc_message_to_json_string(grpc_config_rule))) +# +# # delete generic config rules; self deletes specific config rule +# _, str_config_rule_id, _, config_rule_kind = parser(database, grpc_config_rule) +# str_config_rule_key_hash = fast_hasher(':'.join([config_rule_kind.value, str_config_rule_id])) +# str_config_rule_key = key_to_str([db_config.pk, str_config_rule_key_hash], separator=':') +# db_config_rule : Optional[ConfigRuleModel] = get_object( +# database, ConfigRuleModel, str_config_rule_key, raise_if_not_found=False) +# if db_config_rule is None: return +# db_config_rule.delete() +# +#def update_config( +# database : Database, db_parent_pk : str, config_name : str, grpc_config_rules +#) -> List[Tuple[Union[ConfigModel, ConfigRuleModel], bool]]: +# +# str_config_key = key_to_str([config_name, db_parent_pk], separator=':') +# result : Tuple[ConfigModel, bool] = get_or_create_object(database, ConfigModel, str_config_key) +# db_config, created = result +# +# db_objects = [(db_config, created)] +# +# for position,grpc_config_rule in enumerate(grpc_config_rules): +# action = grpc_to_enum__config_action(grpc_config_rule.action) +# +# if action == ORM_ConfigActionEnum.SET: +# result : Tuple[ConfigRuleModel, bool] = set_config_rule( +# database, db_config, grpc_config_rule, position) +# db_config_rule, updated = result +# db_objects.append((db_config_rule, updated)) +# elif action == ORM_ConfigActionEnum.DELETE: +# delete_config_rule(database, db_config, grpc_config_rule) +# else: +# msg = 'Unsupported Action({:s}) for ConfigRule({:s})' +# str_action = str(ConfigActionEnum.Name(action)) +# str_config_rule = grpc_message_to_json_string(grpc_config_rule) +# raise AttributeError(msg.format(str_action, str_config_rule)) +# +# return db_objects diff --git a/src/context/service/database/Constraint.py b/src/context/service/database/Constraint.py new file mode 100644 index 000000000..5c94d13c0 --- /dev/null +++ b/src/context/service/database/Constraint.py @@ -0,0 +1,110 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from sqlalchemy import delete +from sqlalchemy.dialects.postgresql import insert +from sqlalchemy.orm import Session +from typing import Dict, List, Optional +from common.proto.context_pb2 import Constraint +from common.tools.grpc.Tools import grpc_message_to_json_string +from .models.ConstraintModel import ConstraintKindEnum, ConstraintModel +from .uuids._Builder import get_uuid_random + +def compose_constraints_data( + constraints : List[Constraint], + service_uuid : Optional[str] = None, slice_uuid : Optional[str] = None +) -> List[Dict]: + dict_constraints : List[Dict] = list() + for position,constraint in enumerate(constraints): + str_kind = constraint.WhichOneof('constraint') + dict_constraint = { + 'constraint_uuid': get_uuid_random(), + 'position' : position, + 'kind' : ConstraintKindEnum._member_map_.get(str_kind.upper()), # pylint: disable=no-member + 'data' : grpc_message_to_json_string(getattr(constraint, str_kind, {})), + } + if service_uuid is not None: dict_constraint['service_uuid'] = service_uuid + if slice_uuid is not None: dict_constraint['slice_uuid' ] = slice_uuid + dict_constraints.append(dict_constraint) + return dict_constraints + +def upsert_constraints( + session : Session, constraints : List[Dict], + service_uuid : Optional[str] = None, slice_uuid : Optional[str] = None +) -> None: + stmt = delete(ConstraintModel) + if service_uuid is not None: stmt = stmt.where(ConstraintModel.service_uuid == service_uuid) + if slice_uuid is not None: stmt = stmt.where(ConstraintModel.slice_uuid == slice_uuid ) + session.execute(stmt) + session.execute(insert(ConstraintModel).values(constraints)) + +# def set_constraint(self, db_constraints: ConstraintsModel, grpc_constraint: Constraint, position: int +# ) -> Tuple[Union_ConstraintModel, bool]: +# with self.session() as session: +# +# grpc_constraint_kind = str(grpc_constraint.WhichOneof('constraint')) +# +# parser = CONSTRAINT_PARSERS.get(grpc_constraint_kind) +# if parser is None: +# raise NotImplementedError('Constraint of kind {:s} is not implemented: {:s}'.format( +# grpc_constraint_kind, grpc_message_to_json_string(grpc_constraint))) +# +# # create specific constraint +# constraint_class, str_constraint_id, constraint_data, constraint_kind = parser(grpc_constraint) +# str_constraint_id = str(uuid.uuid4()) +# LOGGER.info('str_constraint_id: {}'.format(str_constraint_id)) +# # str_constraint_key_hash = fast_hasher(':'.join([constraint_kind.value, str_constraint_id])) +# # str_constraint_key = key_to_str([db_constraints.pk, str_constraint_key_hash], separator=':') +# +# # result : Tuple[Union_ConstraintModel, bool] = update_or_create_object( +# # database, constraint_class, str_constraint_key, constraint_data) +# constraint_data[constraint_class.main_pk_name()] = str_constraint_id +# db_new_constraint = constraint_class(**constraint_data) +# result: Tuple[Union_ConstraintModel, bool] = self.database.create_or_update(db_new_constraint) +# db_specific_constraint, updated = result +# +# # create generic constraint +# # constraint_fk_field_name = 'constraint_uuid'.format(constraint_kind.value) +# constraint_data = { +# 'constraints_uuid': db_constraints.constraints_uuid, 'position': position, 'kind': constraint_kind +# } +# +# db_new_constraint = ConstraintModel(**constraint_data) +# result: Tuple[Union_ConstraintModel, bool] = self.database.create_or_update(db_new_constraint) +# db_constraint, updated = result +# +# return db_constraint, updated +# +# def set_constraints(self, service_uuid: str, constraints_name : str, grpc_constraints +# ) -> List[Tuple[Union[ConstraintsModel, ConstraintModel], bool]]: +# with self.session() as session: +# # str_constraints_key = key_to_str([db_parent_pk, constraints_name], separator=':') +# # result : Tuple[ConstraintsModel, bool] = get_or_create_object(database, ConstraintsModel, str_constraints_key) +# result = session.query(ConstraintsModel).filter_by(constraints_uuid=service_uuid).one_or_none() +# created = None +# if result: +# created = True +# session.query(ConstraintsModel).filter_by(constraints_uuid=service_uuid).one_or_none() +# db_constraints = ConstraintsModel(constraints_uuid=service_uuid) +# session.add(db_constraints) +# +# db_objects = [(db_constraints, created)] +# +# for position,grpc_constraint in enumerate(grpc_constraints): +# result : Tuple[ConstraintModel, bool] = self.set_constraint( +# db_constraints, grpc_constraint, position) +# db_constraint, updated = result +# db_objects.append((db_constraint, updated)) +# +# return db_objects diff --git a/src/context/service/database/Device.py b/src/context/service/database/Device.py index a0e0a53e5..7607a2349 100644 --- a/src/context/service/database/Device.py +++ b/src/context/service/database/Device.py @@ -12,25 +12,21 @@ # See the License for the specific language governing permissions and # limitations under the License. -from sqlalchemy import delete from sqlalchemy.dialects.postgresql import insert from sqlalchemy.engine import Engine from sqlalchemy.orm import Session, sessionmaker from sqlalchemy_cockroachdb import run_transaction -from typing import Dict, List, Optional, Set, Tuple +from typing import Dict, List, Optional, Set from common.proto.context_pb2 import Device, DeviceId, DeviceIdList, DeviceList from common.rpc_method_wrapper.ServiceExceptions import InvalidArgumentException, NotFoundException from common.tools.object_factory.Device import json_device_id -from common.tools.grpc.Tools import grpc_message_to_json_string -from .models.ConfigRuleModel import ConfigRuleKindEnum, ConfigRuleModel +from context.service.database.ConfigRule import compose_config_rules_data, upsert_config_rules from .models.DeviceModel import DeviceModel from .models.EndPointModel import EndPointModel from .models.RelationModels import TopologyDeviceModel -from .models.enums.ConfigAction import grpc_to_enum__config_action from .models.enums.DeviceDriver import grpc_to_enum__device_driver from .models.enums.DeviceOperationalStatus import grpc_to_enum__device_operational_status from .models.enums.KpiSampleType import grpc_to_enum__kpi_sample_type -from .uuids._Builder import get_uuid_random from .uuids.Device import device_get_uuid from .uuids.EndPoint import endpoint_get_uuid @@ -108,18 +104,7 @@ def device_set(db_engine : Engine, request : Device) -> bool: }) topology_uuids.add(endpoint_topology_uuid) - config_rules : List[Dict] = list() - for position,config_rule in enumerate(request.device_config.config_rules): - configrule_uuid = get_uuid_random() - str_kind = config_rule.WhichOneof('config_rule') - config_rules.append({ - 'configrule_uuid': configrule_uuid, - 'device_uuid' : device_uuid, - 'position' : position, - 'kind' : ConfigRuleKindEnum._member_map_.get(str_kind.upper()), # pylint: disable=no-member - 'action' : grpc_to_enum__config_action(config_rule.action), - 'data' : grpc_message_to_json_string(getattr(config_rule, str_kind, {})), - }) + config_rules = compose_config_rules_data(request.device_config.config_rules, device_uuid=device_uuid) device_data = [{ 'device_uuid' : device_uuid, @@ -157,8 +142,7 @@ def device_set(db_engine : Engine, request : Device) -> bool: index_elements=[TopologyDeviceModel.topology_uuid, TopologyDeviceModel.device_uuid] )) - session.execute(delete(ConfigRuleModel).where(ConfigRuleModel.device_uuid == device_uuid)) - session.execute(insert(ConfigRuleModel).values(config_rules)) + upsert_config_rules(session, config_rules, device_uuid=device_uuid) run_transaction(sessionmaker(bind=db_engine), callback) updated = False # TODO: improve and check if created/updated @@ -167,143 +151,6 @@ def device_set(db_engine : Engine, request : Device) -> bool: def device_delete(db_engine : Engine, request : DeviceId) -> bool: device_uuid = device_get_uuid(request, allow_random=False) def callback(session : Session) -> bool: - #session.query(TopologyDeviceModel).filter_by(device_uuid=device_uuid).delete() num_deleted = session.query(DeviceModel).filter_by(device_uuid=device_uuid).delete() - #db_device = session.query(DeviceModel).filter_by(device_uuid=device_uuid).one_or_none() - #session.query(ConfigRuleModel).filter_by(config_uuid=db_device.device_config_uuid).delete() - #session.query(ConfigModel).filter_by(config_uuid=db_device.device_config_uuid).delete() - #session.query(DeviceModel).filter_by(device_uuid=device_uuid).delete() return num_deleted > 0 return run_transaction(sessionmaker(bind=db_engine), callback) - - - - -#Union_SpecificConfigRule = Union[ -# ConfigRuleCustomModel, ConfigRuleAclModel -#] -# -#def set_config_rule( -# database : Database, db_config : ConfigModel, position : int, resource_key : str, resource_value : str, -#): # -> Tuple[ConfigRuleModel, bool]: -# -# str_rule_key_hash = fast_hasher(resource_key) -# str_config_rule_key = key_to_str([db_config.config_uuid, str_rule_key_hash], separator=':') -# -# data = {'config_fk': db_config, 'position': position, 'action': ORM_ConfigActionEnum.SET, 'key': resource_key, -# 'value': resource_value} -# to_add = ConfigRuleModel(**data) -# -# result = database.create_or_update(to_add) -# return result -#Tuple_ConfigRuleSpecs = Tuple[Type, str, Dict, ConfigRuleKindEnum] -# -#def parse_config_rule_custom(database : Database, grpc_config_rule) -> Tuple_ConfigRuleSpecs: -# config_rule_class = ConfigRuleCustomModel -# str_config_rule_id = grpc_config_rule.custom.resource_key -# config_rule_data = { -# 'key' : grpc_config_rule.custom.resource_key, -# 'value': grpc_config_rule.custom.resource_value, -# } -# return config_rule_class, str_config_rule_id, config_rule_data, ConfigRuleKindEnum.CUSTOM -# -#def parse_config_rule_acl(database : Database, grpc_config_rule) -> Tuple_ConfigRuleSpecs: -# config_rule_class = ConfigRuleAclModel -# grpc_endpoint_id = grpc_config_rule.acl.endpoint_id -# grpc_rule_set = grpc_config_rule.acl.rule_set -# device_uuid = grpc_endpoint_id.device_id.device_uuid.uuid -# endpoint_uuid = grpc_endpoint_id.endpoint_uuid.uuid -# str_endpoint_key = '/'.join([device_uuid, endpoint_uuid]) -# #str_endpoint_key, db_endpoint = get_endpoint(database, grpc_endpoint_id) -# str_config_rule_id = ':'.join([str_endpoint_key, grpc_rule_set.name]) -# config_rule_data = { -# #'endpoint_fk': db_endpoint, -# 'endpoint_id': grpc_message_to_json_string(grpc_endpoint_id), -# 'acl_data': grpc_message_to_json_string(grpc_rule_set), -# } -# return config_rule_class, str_config_rule_id, config_rule_data, ConfigRuleKindEnum.ACL -# -#CONFIGRULE_PARSERS = { -# 'custom': parse_config_rule_custom, -# 'acl' : parse_config_rule_acl, -#} -# -#Union_ConfigRuleModel = Union[ -# ConfigRuleCustomModel, ConfigRuleAclModel, -#] -# -#def set_config_rule( -# database : Database, db_config : ConfigModel, grpc_config_rule : ConfigRule, position : int -#) -> Tuple[Union_ConfigRuleModel, bool]: -# grpc_config_rule_kind = str(grpc_config_rule.WhichOneof('config_rule')) -# parser = CONFIGRULE_PARSERS.get(grpc_config_rule_kind) -# if parser is None: -# raise NotImplementedError('ConfigRule of kind {:s} is not implemented: {:s}'.format( -# grpc_config_rule_kind, grpc_message_to_json_string(grpc_config_rule))) -# -# # create specific ConfigRule -# config_rule_class, str_config_rule_id, config_rule_data, config_rule_kind = parser(database, grpc_config_rule) -# str_config_rule_key_hash = fast_hasher(':'.join([config_rule_kind.value, str_config_rule_id])) -# str_config_rule_key = key_to_str([db_config.pk, str_config_rule_key_hash], separator=':') -# result : Tuple[Union_ConfigRuleModel, bool] = update_or_create_object( -# database, config_rule_class, str_config_rule_key, config_rule_data) -# db_specific_config_rule, updated = result -# -# # create generic ConfigRule -# config_rule_fk_field_name = 'config_rule_{:s}_fk'.format(config_rule_kind.value) -# config_rule_data = { -# 'config_fk': db_config, 'kind': config_rule_kind, 'position': position, -# 'action': ORM_ConfigActionEnum.SET, -# config_rule_fk_field_name: db_specific_config_rule -# } -# result : Tuple[ConfigRuleModel, bool] = update_or_create_object( -# database, ConfigRuleModel, str_config_rule_key, config_rule_data) -# db_config_rule, updated = result -# -# return db_config_rule, updated -# -#def delete_config_rule( -# database : Database, db_config : ConfigModel, grpc_config_rule : ConfigRule -#) -> None: -# grpc_config_rule_kind = str(grpc_config_rule.WhichOneof('config_rule')) -# parser = CONFIGRULE_PARSERS.get(grpc_config_rule_kind) -# if parser is None: -# raise NotImplementedError('ConfigRule of kind {:s} is not implemented: {:s}'.format( -# grpc_config_rule_kind, grpc_message_to_json_string(grpc_config_rule))) -# -# # delete generic config rules; self deletes specific config rule -# _, str_config_rule_id, _, config_rule_kind = parser(database, grpc_config_rule) -# str_config_rule_key_hash = fast_hasher(':'.join([config_rule_kind.value, str_config_rule_id])) -# str_config_rule_key = key_to_str([db_config.pk, str_config_rule_key_hash], separator=':') -# db_config_rule : Optional[ConfigRuleModel] = get_object( -# database, ConfigRuleModel, str_config_rule_key, raise_if_not_found=False) -# if db_config_rule is None: return -# db_config_rule.delete() -# -#def update_config( -# database : Database, db_parent_pk : str, config_name : str, grpc_config_rules -#) -> List[Tuple[Union[ConfigModel, ConfigRuleModel], bool]]: -# -# str_config_key = key_to_str([config_name, db_parent_pk], separator=':') -# result : Tuple[ConfigModel, bool] = get_or_create_object(database, ConfigModel, str_config_key) -# db_config, created = result -# -# db_objects = [(db_config, created)] -# -# for position,grpc_config_rule in enumerate(grpc_config_rules): -# action = grpc_to_enum__config_action(grpc_config_rule.action) -# -# if action == ORM_ConfigActionEnum.SET: -# result : Tuple[ConfigRuleModel, bool] = set_config_rule( -# database, db_config, grpc_config_rule, position) -# db_config_rule, updated = result -# db_objects.append((db_config_rule, updated)) -# elif action == ORM_ConfigActionEnum.DELETE: -# delete_config_rule(database, db_config, grpc_config_rule) -# else: -# msg = 'Unsupported Action({:s}) for ConfigRule({:s})' -# str_action = str(ConfigActionEnum.Name(action)) -# str_config_rule = grpc_message_to_json_string(grpc_config_rule) -# raise AttributeError(msg.format(str_action, str_config_rule)) -# -# return db_objects diff --git a/src/context/service/database/Link.py b/src/context/service/database/Link.py index 93f90b3ea..9f11cad23 100644 --- a/src/context/service/database/Link.py +++ b/src/context/service/database/Link.py @@ -108,10 +108,6 @@ def link_set(db_engine : Engine, request : Link) -> bool: def link_delete(db_engine : Engine, request : LinkId) -> bool: link_uuid = link_get_uuid(request, allow_random=False) def callback(session : Session) -> bool: - #session.query(TopologyLinkModel).filter_by(link_uuid=link_uuid).delete() - #session.query(LinkEndPointModel).filter_by(link_uuid=link_uuid).delete() num_deleted = session.query(LinkModel).filter_by(link_uuid=link_uuid).delete() - #db_link = session.query(LinkModel).filter_by(link_uuid=link_uuid).one_or_none() - #session.query(LinkModel).filter_by(link_uuid=link_uuid).delete() return num_deleted > 0 return run_transaction(sessionmaker(bind=db_engine), callback) diff --git a/src/context/service/database/Service.py b/src/context/service/database/Service.py index 3b6b4cc26..7e3d9d044 100644 --- a/src/context/service/database/Service.py +++ b/src/context/service/database/Service.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import time from sqlalchemy.dialects.postgresql import insert from sqlalchemy.engine import Engine from sqlalchemy.orm import Session, sessionmaker @@ -20,10 +19,20 @@ from sqlalchemy_cockroachdb import run_transaction from typing import Dict, List, Optional from common.proto.context_pb2 import ContextId, Service, ServiceId, ServiceIdList, ServiceList from common.rpc_method_wrapper.ServiceExceptions import InvalidArgumentException, NotFoundException +from common.tools.object_factory.Context import json_context_id +from common.tools.object_factory.Service import json_service_id +from context.service.database.ConfigRule import compose_config_rules_data, upsert_config_rules +from context.service.database.Constraint import compose_constraints_data, upsert_constraints +from .models.enums.ServiceStatus import grpc_to_enum__service_status +from .models.enums.ServiceType import grpc_to_enum__service_type +from .models.RelationModels import ServiceEndPointModel from .models.ServiceModel import ServiceModel +from .uuids.Context import context_get_uuid +from .uuids.EndPoint import endpoint_get_uuid +from .uuids.Service import service_get_uuid def service_list_ids(db_engine : Engine, request : ContextId) -> ServiceIdList: - context_uuid = request.context_uuid.uuid + context_uuid = context_get_uuid(request, allow_random=False) def callback(session : Session) -> List[Dict]: obj_list : List[ServiceModel] = session.query(ServiceModel).filter_by(context_uuid=context_uuid).all() #.options(selectinload(ContextModel.service)).filter_by(context_uuid=context_uuid).one_or_none() @@ -31,7 +40,7 @@ def service_list_ids(db_engine : Engine, request : ContextId) -> ServiceIdList: return ServiceIdList(service_ids=run_transaction(sessionmaker(bind=db_engine), callback)) def service_list_objs(db_engine : Engine, request : ContextId) -> ServiceList: - context_uuid = request.context_uuid.uuid + context_uuid = context_get_uuid(request, allow_random=False) def callback(session : Session) -> List[Dict]: obj_list : List[ServiceModel] = session.query(ServiceModel).filter_by(context_uuid=context_uuid).all() #.options(selectinload(ContextModel.service)).filter_by(context_uuid=context_uuid).one_or_none() @@ -39,225 +48,87 @@ def service_list_objs(db_engine : Engine, request : ContextId) -> ServiceList: return ServiceList(services=run_transaction(sessionmaker(bind=db_engine), callback)) def service_get(db_engine : Engine, request : ServiceId) -> Service: - context_uuid = request.context_id.context_uuid.uuid - service_uuid = request.service_uuid.uuid - + _,service_uuid = service_get_uuid(request, allow_random=False) def callback(session : Session) -> Optional[Dict]: obj : Optional[ServiceModel] = session.query(ServiceModel)\ - .filter_by(context_uuid=context_uuid, service_uuid=service_uuid).one_or_none() + .filter_by(service_uuid=service_uuid).one_or_none() return None if obj is None else obj.dump() obj = run_transaction(sessionmaker(bind=db_engine), callback) if obj is None: - obj_uuid = '{:s}/{:s}'.format(context_uuid, service_uuid) - raise NotFoundException('Service', obj_uuid) + context_uuid = context_get_uuid(request.context_id, allow_random=False) + raw_service_uuid = '{:s}/{:s}'.format(request.context_id.context_uuid.uuid, request.service_uuid.uuid) + raise NotFoundException('Service', raw_service_uuid, extra_details=[ + 'context_uuid generated was: {:s}'.format(context_uuid), + 'service_uuid generated was: {:s}'.format(service_uuid), + ]) return Service(**obj) def service_set(db_engine : Engine, request : Service) -> bool: - context_uuid = request.service_id.context_id.context_uuid.uuid - service_uuid = request.service_id.service_uuid.uuid - service_name = request.name + raw_context_uuid = request.service_id.context_id.context_uuid.uuid + raw_service_uuid = request.service_id.service_uuid.uuid + raw_service_name = request.name + service_name = raw_service_uuid if len(raw_service_name) == 0 else raw_service_name + context_uuid,service_uuid = service_get_uuid(request.service_id, service_name=service_name, allow_random=True) + + service_type = grpc_to_enum__service_type(request.service_type) + service_status = grpc_to_enum__service_status(request.service_status.service_status) + service_endpoints_data : List[Dict] = list() for i,endpoint_id in enumerate(request.service_endpoint_ids): endpoint_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid - if len(endpoint_context_uuid) > 0 and context_uuid != endpoint_context_uuid: + if len(endpoint_context_uuid) == 0: endpoint_context_uuid = context_uuid + if endpoint_context_uuid not in {raw_context_uuid, context_uuid}: raise InvalidArgumentException( 'request.service_endpoint_ids[{:d}].topology_id.context_id.context_uuid.uuid'.format(i), endpoint_context_uuid, - ['should be == {:s}({:s})'.format('request.service_id.context_id.context_uuid.uuid', context_uuid)]) + ['should be == request.service_id.context_id.context_uuid.uuid({:s})'.format(raw_context_uuid)]) + + _, _, endpoint_uuid = endpoint_get_uuid(endpoint_id, allow_random=False) + service_endpoints_data.append({ + 'service_uuid' : service_uuid, + 'endpoint_uuid': endpoint_uuid, + }) + constraints = compose_constraints_data(request.service_constraints, service_uuid=service_uuid) + config_rules = compose_config_rules_data(request.service_config.config_rules, service_uuid=service_uuid) + + service_data = [{ + 'context_uuid' : context_uuid, + 'service_uuid' : service_uuid, + 'service_name' : service_name, + 'service_type' : service_type, + 'service_status': service_status, + }] def callback(session : Session) -> None: - service_data = [{ - 'context_uuid' : context_uuid, - 'service_uuid': service_uuid, - 'service_name': service_name, - 'created_at' : time.time(), - }] stmt = insert(ServiceModel).values(service_data) stmt = stmt.on_conflict_do_update( - index_elements=[ServiceModel.context_uuid, ServiceModel.service_uuid], - set_=dict(service_name = stmt.excluded.service_name) + index_elements=[ServiceModel.service_uuid], + set_=dict( + service_name = stmt.excluded.service_name, + service_type = stmt.excluded.service_type, + service_status = stmt.excluded.service_status, + ) ) session.execute(stmt) - run_transaction(sessionmaker(bind=db_engine), callback) - return False # TODO: improve and check if created/updated - + stmt = insert(ServiceEndPointModel).values(service_endpoints_data) + stmt = stmt.on_conflict_do_nothing( + index_elements=[ServiceEndPointModel.service_uuid, ServiceEndPointModel.endpoint_uuid] + ) + session.execute(stmt) -# # db_context : ContextModel = get_object(self.database, ContextModel, context_uuid) -# db_context = session.query(ContextModel).filter_by(context_uuid=context_uuid).one_or_none() -# # str_service_key = key_to_str([context_uuid, service_uuid]) -# constraints_result = self.set_constraints(service_uuid, 'constraints', request.service_constraints) -# db_constraints = constraints_result[0][0] -# -# config_rules = grpc_config_rules_to_raw(request.service_config.config_rules) -# running_config_result = update_config(self.database, str_service_key, 'running', config_rules) -# db_running_config = running_config_result[0][0] -# -# result : Tuple[ServiceModel, bool] = update_or_create_object(self.database, ServiceModel, str_service_key, { -# 'context_fk' : db_context, -# 'service_uuid' : service_uuid, -# 'service_type' : grpc_to_enum__service_type(request.service_type), -# 'service_constraints_fk': db_constraints, -# 'service_status' : grpc_to_enum__service_status(request.service_status.service_status), -# 'service_config_fk' : db_running_config, -# }) -# db_service, updated = result -# -# for i,endpoint_id in enumerate(request.service_endpoint_ids): -# endpoint_uuid = endpoint_id.endpoint_uuid.uuid -# endpoint_device_uuid = endpoint_id.device_id.device_uuid.uuid -# endpoint_topology_uuid = endpoint_id.topology_id.topology_uuid.uuid -# endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid -# -# str_endpoint_key = key_to_str([endpoint_device_uuid, endpoint_uuid]) -# if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0: -# str_topology_key = key_to_str([endpoint_topology_context_uuid, endpoint_topology_uuid]) -# str_endpoint_key = key_to_str([str_endpoint_key, str_topology_key], separator=':') -# -# db_endpoint : EndPointModel = get_object(self.database, EndPointModel, str_endpoint_key) -# -# str_service_endpoint_key = key_to_str([service_uuid, str_endpoint_key], separator='--') -# result : Tuple[ServiceEndPointModel, bool] = get_or_create_object( -# self.database, ServiceEndPointModel, str_service_endpoint_key, { -# 'service_fk': db_service, 'endpoint_fk': db_endpoint}) -# #db_service_endpoint, service_endpoint_created = result -# -# event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE -# dict_service_id = db_service.dump_id() -# notify_event(self.messagebroker, TOPIC_SERVICE, event_type, {'service_id': dict_service_id}) -# return ServiceId(**dict_service_id) -# context_uuid = request.service_id.context_id.context_uuid.uuid -# db_context : ContextModel = get_object(self.database, ContextModel, context_uuid) -# -# for i,endpoint_id in enumerate(request.service_endpoint_ids): -# endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid -# if len(endpoint_topology_context_uuid) > 0 and context_uuid != endpoint_topology_context_uuid: -# raise InvalidArgumentException( -# 'request.service_endpoint_ids[{:d}].topology_id.context_id.context_uuid.uuid'.format(i), -# endpoint_topology_context_uuid, -# ['should be == {:s}({:s})'.format( -# 'request.service_id.context_id.context_uuid.uuid', context_uuid)]) -# -# service_uuid = request.service_id.service_uuid.uuid -# str_service_key = key_to_str([context_uuid, service_uuid]) -# -# constraints_result = set_constraints( -# self.database, str_service_key, 'service', request.service_constraints) -# db_constraints = constraints_result[0][0] -# -# running_config_rules = update_config( -# self.database, str_service_key, 'service', request.service_config.config_rules) -# db_running_config = running_config_rules[0][0] -# -# result : Tuple[ServiceModel, bool] = update_or_create_object(self.database, ServiceModel, str_service_key, { -# 'context_fk' : db_context, -# 'service_uuid' : service_uuid, -# 'service_type' : grpc_to_enum__service_type(request.service_type), -# 'service_constraints_fk': db_constraints, -# 'service_status' : grpc_to_enum__service_status(request.service_status.service_status), -# 'service_config_fk' : db_running_config, -# }) -# db_service, updated = result -# -# for i,endpoint_id in enumerate(request.service_endpoint_ids): -# endpoint_uuid = endpoint_id.endpoint_uuid.uuid -# endpoint_device_uuid = endpoint_id.device_id.device_uuid.uuid -# endpoint_topology_uuid = endpoint_id.topology_id.topology_uuid.uuid -# endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid -# -# str_endpoint_key = key_to_str([endpoint_device_uuid, endpoint_uuid]) -# if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0: -# str_topology_key = key_to_str([endpoint_topology_context_uuid, endpoint_topology_uuid]) -# str_endpoint_key = key_to_str([str_endpoint_key, str_topology_key], separator=':') -# -# db_endpoint : EndPointModel = get_object(self.database, EndPointModel, str_endpoint_key) -# -# str_service_endpoint_key = key_to_str([service_uuid, str_endpoint_key], separator='--') -# result : Tuple[ServiceEndPointModel, bool] = get_or_create_object( -# self.database, ServiceEndPointModel, str_service_endpoint_key, { -# 'service_fk': db_service, 'endpoint_fk': db_endpoint}) -# #db_service_endpoint, service_endpoint_created = result -# -# event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE -# dict_service_id = db_service.dump_id() -# notify_event(self.messagebroker, TOPIC_SERVICE, event_type, {'service_id': dict_service_id}) -# return ServiceId(**dict_service_id) + upsert_constraints(session, constraints, service_uuid=service_uuid) + upsert_config_rules(session, config_rules, service_uuid=service_uuid) + run_transaction(sessionmaker(bind=db_engine), callback) + updated = False # TODO: improve and check if created/updated + return ServiceId(**json_service_id(service_uuid, json_context_id(context_uuid))),updated -# def set_constraint(self, db_constraints: ConstraintsModel, grpc_constraint: Constraint, position: int -# ) -> Tuple[Union_ConstraintModel, bool]: -# with self.session() as session: -# -# grpc_constraint_kind = str(grpc_constraint.WhichOneof('constraint')) -# -# parser = CONSTRAINT_PARSERS.get(grpc_constraint_kind) -# if parser is None: -# raise NotImplementedError('Constraint of kind {:s} is not implemented: {:s}'.format( -# grpc_constraint_kind, grpc_message_to_json_string(grpc_constraint))) -# -# # create specific constraint -# constraint_class, str_constraint_id, constraint_data, constraint_kind = parser(grpc_constraint) -# str_constraint_id = str(uuid.uuid4()) -# LOGGER.info('str_constraint_id: {}'.format(str_constraint_id)) -# # str_constraint_key_hash = fast_hasher(':'.join([constraint_kind.value, str_constraint_id])) -# # str_constraint_key = key_to_str([db_constraints.pk, str_constraint_key_hash], separator=':') -# -# # result : Tuple[Union_ConstraintModel, bool] = update_or_create_object( -# # database, constraint_class, str_constraint_key, constraint_data) -# constraint_data[constraint_class.main_pk_name()] = str_constraint_id -# db_new_constraint = constraint_class(**constraint_data) -# result: Tuple[Union_ConstraintModel, bool] = self.database.create_or_update(db_new_constraint) -# db_specific_constraint, updated = result -# -# # create generic constraint -# # constraint_fk_field_name = 'constraint_uuid'.format(constraint_kind.value) -# constraint_data = { -# 'constraints_uuid': db_constraints.constraints_uuid, 'position': position, 'kind': constraint_kind -# } -# -# db_new_constraint = ConstraintModel(**constraint_data) -# result: Tuple[Union_ConstraintModel, bool] = self.database.create_or_update(db_new_constraint) -# db_constraint, updated = result -# -# return db_constraint, updated -# -# def set_constraints(self, service_uuid: str, constraints_name : str, grpc_constraints -# ) -> List[Tuple[Union[ConstraintsModel, ConstraintModel], bool]]: -# with self.session() as session: -# # str_constraints_key = key_to_str([db_parent_pk, constraints_name], separator=':') -# # result : Tuple[ConstraintsModel, bool] = get_or_create_object(database, ConstraintsModel, str_constraints_key) -# result = session.query(ConstraintsModel).filter_by(constraints_uuid=service_uuid).one_or_none() -# created = None -# if result: -# created = True -# session.query(ConstraintsModel).filter_by(constraints_uuid=service_uuid).one_or_none() -# db_constraints = ConstraintsModel(constraints_uuid=service_uuid) -# session.add(db_constraints) -# -# db_objects = [(db_constraints, created)] -# -# for position,grpc_constraint in enumerate(grpc_constraints): -# result : Tuple[ConstraintModel, bool] = self.set_constraint( -# db_constraints, grpc_constraint, position) -# db_constraint, updated = result -# db_objects.append((db_constraint, updated)) -# -# return db_objects def service_delete(db_engine : Engine, request : ServiceId) -> bool: - context_uuid = request.context_id.context_uuid.uuid - service_uuid = request.service_uuid.uuid + _,service_uuid = service_get_uuid(request, allow_random=False) def callback(session : Session) -> bool: - num_deleted = session.query(ServiceModel)\ - .filter_by(context_uuid=context_uuid, service_uuid=service_uuid).delete() + num_deleted = session.query(ServiceModel).filter_by(service_uuid=service_uuid).delete() return num_deleted > 0 return run_transaction(sessionmaker(bind=db_engine), callback) - - # def delete(self) -> None: - # from .RelationModels import ServiceEndPointModel - # for db_service_endpoint_pk,_ in self.references(ServiceEndPointModel): - # ServiceEndPointModel(self.database, db_service_endpoint_pk).delete() - # super().delete() - # ConfigModel(self.database, self.service_config_fk).delete() - # ConstraintsModel(self.database, self.service_constraints_fk).delete() diff --git a/src/context/service/database/Topology.py b/src/context/service/database/Topology.py index 25fa02f4b..ae8d0a8bd 100644 --- a/src/context/service/database/Topology.py +++ b/src/context/service/database/Topology.py @@ -12,20 +12,22 @@ # See the License for the specific language governing permissions and # limitations under the License. +import logging from sqlalchemy.dialects.postgresql import insert from sqlalchemy.engine import Engine from sqlalchemy.orm import Session, sessionmaker from sqlalchemy_cockroachdb import run_transaction -from typing import Dict, List, Optional, Set +from typing import Dict, List, Optional from common.proto.context_pb2 import ContextId, Topology, TopologyId, TopologyIdList, TopologyList -from common.rpc_method_wrapper.ServiceExceptions import InvalidArgumentsException, NotFoundException +from common.rpc_method_wrapper.ServiceExceptions import NotFoundException from common.tools.object_factory.Context import json_context_id from common.tools.object_factory.Topology import json_topology_id -#from .models.RelationModels import TopologyDeviceModel, TopologyLinkModel from .models.TopologyModel import TopologyModel from .uuids.Context import context_get_uuid from .uuids.Topology import topology_get_uuid +LOGGER = logging.getLogger(__name__) + def topology_list_ids(db_engine : Engine, request : ContextId) -> TopologyIdList: context_uuid = context_get_uuid(request, allow_random=False) def callback(session : Session) -> List[Dict]: @@ -63,21 +65,15 @@ def topology_set(db_engine : Engine, request : Topology) -> bool: if len(topology_name) == 0: topology_name = request.topology_id.topology_uuid.uuid context_uuid,topology_uuid = topology_get_uuid(request.topology_id, topology_name=topology_name, allow_random=True) - #device_uuids : Set[str] = set() - #devices_to_add : List[Dict] = list() - #for device_id in request.device_ids: - # device_uuid = device_id.device_uuid.uuid - # if device_uuid in device_uuids: continue - # devices_to_add.append({'topology_uuid': topology_uuid, 'device_uuid': device_uuid}) - # device_uuids.add(device_uuid) + # Ignore request.device_ids and request.link_ids. They are used for retrieving devices and links added into the + # topology. Explicit addition into the topology is done automatically when creating the devices and links, based + # on the topologies specified in the endpoints associated with the devices and links. + + if len(request.device_ids) > 0: # pragma: no cover + LOGGER.warning('Items in field "device_ids" ignored. This field is used for retrieval purposes only.') - #link_uuids : Set[str] = set() - #links_to_add : List[Dict] = list() - #for link_id in request.link_ids: - # link_uuid = link_id.link_uuid.uuid - # if link_uuid in link_uuids: continue - # links_to_add.append({'topology_uuid': topology_uuid, 'link_uuid': link_uuid}) - # link_uuids.add(link_uuid) + if len(request.link_ids) > 0: # pragma: no cover + LOGGER.warning('Items in field "link_ids" ignored. This field is used for retrieval purposes only.') topology_data = [{ 'context_uuid' : context_uuid, @@ -93,16 +89,6 @@ def topology_set(db_engine : Engine, request : Topology) -> bool: ) session.execute(stmt) - #if len(devices_to_add) > 0: - # session.execute(insert(TopologyDeviceModel).values(devices_to_add).on_conflict_do_nothing( - # index_elements=[TopologyDeviceModel.topology_uuid, TopologyDeviceModel.device_uuid] - # )) - - #if len(links_to_add) > 0: - # session.execute(insert(TopologyLinkModel).values(links_to_add).on_conflict_do_nothing( - # index_elements=[TopologyLinkModel.topology_uuid, TopologyLinkModel.link_uuid] - # )) - run_transaction(sessionmaker(bind=db_engine), callback) updated = False # TODO: improve and check if created/updated return TopologyId(**json_topology_id(topology_uuid, json_context_id(context_uuid))),updated @@ -110,7 +96,6 @@ def topology_set(db_engine : Engine, request : Topology) -> bool: def topology_delete(db_engine : Engine, request : TopologyId) -> bool: _,topology_uuid = topology_get_uuid(request, allow_random=False) def callback(session : Session) -> bool: - num_deleted = session.query(TopologyModel)\ - .filter_by(topology_uuid=topology_uuid).delete() + num_deleted = session.query(TopologyModel).filter_by(topology_uuid=topology_uuid).delete() return num_deleted > 0 return run_transaction(sessionmaker(bind=db_engine), callback) diff --git a/src/context/service/database/models/ConfigRuleModel.py b/src/context/service/database/models/ConfigRuleModel.py index 9d56344e8..11e151ef6 100644 --- a/src/context/service/database/models/ConfigRuleModel.py +++ b/src/context/service/database/models/ConfigRuleModel.py @@ -28,15 +28,17 @@ class ConfigRuleModel(_Base): __tablename__ = 'configrule' configrule_uuid = Column(UUID(as_uuid=False), primary_key=True) - device_uuid = Column(ForeignKey('device.device_uuid', ondelete='CASCADE')) + device_uuid = Column(ForeignKey('device.device_uuid', ondelete='CASCADE'), nullable=True) + service_uuid = Column(ForeignKey('service.service_uuid', ondelete='CASCADE'), nullable=True) position = Column(Integer, nullable=False) - kind = Column(Enum(ConfigRuleKindEnum)) - action = Column(Enum(ORM_ConfigActionEnum)) + kind = Column(Enum(ConfigRuleKindEnum), nullable=False) + action = Column(Enum(ORM_ConfigActionEnum), nullable=False) data = Column(String, nullable=False) __table_args__ = ( CheckConstraint(position >= 0, name='check_position_value'), #UniqueConstraint('device_uuid', 'position', name='unique_per_device'), + #UniqueConstraint('service_uuid', 'position', name='unique_per_service'), ) def dump(self) -> Dict: diff --git a/src/context/service/database/models/ConstraintModel.py b/src/context/service/database/models/ConstraintModel.py index d616c3a7f..118ae9505 100644 --- a/src/context/service/database/models/ConstraintModel.py +++ b/src/context/service/database/models/ConstraintModel.py @@ -12,144 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging, operator -from typing import Dict, List, Optional, Tuple, Type, Union -from common.orm.HighLevel import get_object, get_or_create_object, update_or_create_object -from common.orm.backend.Tools import key_to_str -from common.proto.context_pb2 import Constraint -from common.tools.grpc.Tools import grpc_message_to_json_string -from .EndPointModel import EndPointModel -from .Tools import fast_hasher -from sqlalchemy import Column, ForeignKey, String, Float, CheckConstraint, Integer, Boolean, Enum +import enum, json +from sqlalchemy import CheckConstraint, Column, Enum, ForeignKey, Integer, String from sqlalchemy.dialects.postgresql import UUID -from context.service.database.models._Base import Base -import enum +from typing import Dict +from ._Base import _Base -LOGGER = logging.getLogger(__name__) - -def remove_dict_key(dictionary : Dict, key : str): - dictionary.pop(key, None) - return dictionary - -class ConstraintsModel(Base): # pylint: disable=abstract-method - __tablename__ = 'Constraints' - constraints_uuid = Column(UUID(as_uuid=False), primary_key=True, unique=True) - - @staticmethod - def main_pk_name(): - return 'constraints_uuid' - - - def dump(self, constraints) -> List[Dict]: - constraints = sorted(constraints, key=operator.itemgetter('position')) - return [remove_dict_key(constraint, 'position') for constraint in constraints] - - -class ConstraintCustomModel(Base): # pylint: disable=abstract-method - __tablename__ = 'ConstraintCustom' - constraint_uuid = Column(UUID(as_uuid=False), primary_key=True, unique=True) - constraint_type = Column(String, nullable=False) - constraint_value = Column(String, nullable=False) - - @staticmethod - def main_pk_name(): - return 'constraint_uuid' - - - def dump(self) -> Dict: # pylint: disable=arguments-differ - return {'custom': {'constraint_type': self.constraint_type, 'constraint_value': self.constraint_value}} - - -Union_ConstraintEndpoint = Union[ - 'ConstraintEndpointLocationGpsPositionModel', 'ConstraintEndpointLocationRegionModel', - 'ConstraintEndpointPriorityModel' -] - -class ConstraintEndpointLocationRegionModel(Model): # pylint: disable=abstract-method - endpoint_fk = ForeignKeyField(EndPointModel) - region = StringField(required=True, allow_empty=False) - - def dump(self) -> Dict: # pylint: disable=arguments-differ - json_endpoint_id = EndPointModel(self.database, self.endpoint_fk).dump_id() - return {'endpoint_location': {'endpoint_id': json_endpoint_id, 'location': {'region': self.region}}} - -# def dump_endpoint_id(endpoint_constraint: Union_ConstraintEndpoint): -# db_endpoints_pks = list(endpoint_constraint.references(EndPointModel)) -# num_endpoints = len(db_endpoints_pks) -# if num_endpoints != 1: -# raise Exception('Wrong number({:d}) of associated Endpoints with constraint'.format(num_endpoints)) -# db_endpoint = EndPointModel(endpoint_constraint.database, db_endpoints_pks[0]) -# return db_endpoint.dump_id() - - -class ConstraintEndpointLocationRegionModel(Base): # pylint: disable=abstract-method - __tablename__ = 'ConstraintEndpointLocationRegion' - constraint_uuid = Column(UUID(as_uuid=False), primary_key=True, unique=True) - endpoint_uuid = Column(UUID(as_uuid=False), ForeignKey("EndPoint.endpoint_uuid")) - region = Column(String, nullable=False) - - @staticmethod - def main_pk_name(): - return 'constraint_uuid' - - def dump(self, endpoint) -> Dict: # pylint: disable=arguments-differ - return {'endpoint_location': {'endpoint_id': endpoint.dump_id(), 'region': self.region}} - - def dump(self) -> Dict: # pylint: disable=arguments-differ - gps_position = {'latitude': self.latitude, 'longitude': self.longitude} - json_endpoint_id = EndPointModel(self.database, self.endpoint_fk).dump_id() - return {'endpoint_location': {'endpoint_id': json_endpoint_id, 'location': {'gps_position': gps_position}}} - -class ConstraintEndpointPriorityModel(Model): # pylint: disable=abstract-method - endpoint_fk = ForeignKeyField(EndPointModel) - priority = IntegerField(required=True, min_value=0) - - def dump(self) -> Dict: # pylint: disable=arguments-differ - json_endpoint_id = EndPointModel(self.database, self.endpoint_fk).dump_id() - return {'endpoint_priority': {'endpoint_id': json_endpoint_id, 'priority': self.priority}} - -class ConstraintEndpointLocationGpsPositionModel(Base): # pylint: disable=abstract-method - __tablename__ = 'ConstraintEndpointLocationGpsPosition' - constraint_uuid = Column(UUID(as_uuid=False), primary_key=True, unique=True) - endpoint_uuid = Column(UUID(as_uuid=False), ForeignKey("EndPoint.endpoint_uuid")) - latitude = Column(Float, CheckConstraint('latitude > -90.0 AND latitude < 90.0'), nullable=False) - longitude = Column(Float, CheckConstraint('longitude > -90.0 AND longitude < 90.0'), nullable=False) - - def dump(self, endpoint) -> Dict: # pylint: disable=arguments-differ - gps_position = {'latitude': self.latitude, 'longitude': self.longitude} - return {'endpoint_location': {'endpoint_id': endpoint.dump_id(), 'gps_position': gps_position}} - - -class ConstraintEndpointPriorityModel(Base): # pylint: disable=abstract-method - __tablename__ = 'ConstraintEndpointPriority' - constraint_uuid = Column(UUID(as_uuid=False), primary_key=True, unique=True) - endpoint_uuid = Column(UUID(as_uuid=False), ForeignKey("EndPoint.endpoint_uuid")) - # endpoint_fk = ForeignKeyField(EndPointModel) - # priority = FloatField(required=True) - priority = Column(Float, nullable=False) - @staticmethod - def main_pk_name(): - return 'constraint_uuid' - - def dump(self, endpoint) -> Dict: # pylint: disable=arguments-differ - return {'endpoint_priority': {'endpoint_id': endpoint.dump_id(), 'priority': self.priority}} - - -class ConstraintSlaAvailabilityModel(Base): # pylint: disable=abstract-method - __tablename__ = 'ConstraintSlaAvailability' - constraint_uuid = Column(UUID(as_uuid=False), primary_key=True, unique=True) - # num_disjoint_paths = IntegerField(required=True, min_value=1) - num_disjoint_paths = Column(Integer, CheckConstraint('num_disjoint_paths > 1'), nullable=False) - # all_active = BooleanField(required=True) - all_active = Column(Boolean, nullable=False) - @staticmethod - def main_pk_name(): - return 'constraint_uuid' - - def dump(self) -> Dict: # pylint: disable=arguments-differ - return {'sla_availability': {'num_disjoint_paths': self.num_disjoint_paths, 'all_active': self.all_active}} - -# enum values should match name of field in ConstraintModel +# Enum values should match name of field in ConstraintModel class ConstraintKindEnum(enum.Enum): CUSTOM = 'custom' ENDPOINT_LOCATION_REGION = 'ep_loc_region' @@ -157,215 +26,370 @@ class ConstraintKindEnum(enum.Enum): ENDPOINT_PRIORITY = 'ep_priority' SLA_AVAILABILITY = 'sla_avail' -Union_SpecificConstraint = Union[ - ConstraintCustomModel, ConstraintEndpointLocationRegionModel, ConstraintEndpointLocationGpsPositionModel, - ConstraintEndpointPriorityModel, ConstraintSlaAvailabilityModel, -] - -class ConstraintModel(Base): # pylint: disable=abstract-method - __tablename__ = 'Constraint' - # pk = PrimaryKeyField() - # constraints_fk = ForeignKeyField(ConstraintsModel) - constraint_uuid = Column(UUID(as_uuid=False), primary_key=True, unique=True) - constraints_uuid = Column(UUID(as_uuid=False), ForeignKey("Constraints.constraints_uuid"), primary_key=True) - # kind = EnumeratedField(ConstraintKindEnum) - kind = Column(Enum(ConstraintKindEnum, create_constraint=False, native_enum=False)) - # position = IntegerField(min_value=0, required=True) - position = Column(Integer, CheckConstraint('position >= 0'), nullable=False) - # constraint_custom_fk = ForeignKeyField(ConstraintCustomModel, required=False) - constraint_custom = Column(UUID(as_uuid=False), ForeignKey("ConstraintCustom.constraint_uuid")) - # constraint_ep_loc_region_fk = ForeignKeyField(ConstraintEndpointLocationRegionModel, required=False) - constraint_ep_loc_region = Column(UUID(as_uuid=False), ForeignKey("ConstraintEndpointLocationRegion.constraint_uuid")) - # constraint_ep_loc_gpspos_fk = ForeignKeyField(ConstraintEndpointLocationGpsPositionModel, required=False) - constraint_ep_loc_gpspos = Column(UUID(as_uuid=False), ForeignKey("ConstraintEndpointLocationGpsPosition.constraint_uuid")) - # constraint_ep_priority_fk = ForeignKeyField(ConstraintEndpointPriorityModel, required=False) - constraint_ep_priority = Column(UUID(as_uuid=False), ForeignKey("ConstraintEndpointPriority.constraint_uuid"),) - # constraint_sla_avail_fk = ForeignKeyField(ConstraintSlaAvailabilityModel, required=False) - constraint_sla_avail = Column(UUID(as_uuid=False), ForeignKey("ConstraintSlaAvailability.constraint_uuid")) - - @staticmethod - def main_pk_name(): - return 'constraint_uuid' - - # def delete(self) -> None: - # field_name = 'constraint_{:s}_fk'.format(str(self.kind.value)) - # specific_fk_value : Optional[ForeignKeyField] = getattr(self, field_name, None) - # if specific_fk_value is None: - # raise Exception('Unable to find constraint key for field_name({:s})'.format(field_name)) - # specific_fk_class = getattr(ConstraintModel, field_name, None) - # foreign_model_class : Model = specific_fk_class.foreign_model - # super().delete() - # get_object(self.database, foreign_model_class, str(specific_fk_value)).delete() - - def dump(self, include_position=True) -> Dict: # pylint: disable=arguments-differ - field_name = 'constraint_{:s}'.format(str(self.kind.value)) - specific_fk_value = getattr(self, field_name, None) - if specific_fk_value is None: - raise Exception('Unable to find constraint key for field_name({:s})'.format(field_name)) - specific_fk_class = getattr(ConstraintModel, field_name, None) - foreign_model_class: Base = specific_fk_class.foreign_model - constraint: Union_SpecificConstraint = get_object(self.database, foreign_model_class, str(specific_fk_value)) - result = constraint.dump() - if include_position: - result['position'] = self.position - return result - -Tuple_ConstraintSpecs = Tuple[Type, str, Dict, ConstraintKindEnum] - -def parse_constraint_custom(grpc_constraint) -> Tuple_ConstraintSpecs: - constraint_class = ConstraintCustomModel - str_constraint_id = grpc_constraint.custom.constraint_type - constraint_data = { - 'constraint_type' : grpc_constraint.custom.constraint_type, - 'constraint_value': grpc_constraint.custom.constraint_value, - } - return constraint_class, str_constraint_id, constraint_data, ConstraintKindEnum.CUSTOM - -def parse_constraint_endpoint_location(db_endpoint, grpc_constraint) -> Tuple_ConstraintSpecs: - grpc_endpoint_id = grpc_constraint.endpoint_location.endpoint_id - # str_endpoint_key, db_endpoint = get_endpoint(database, grpc_endpoint_id) - - str_constraint_id = db_endpoint.endpoint_uuid - constraint_data = {'endpoint_fk': db_endpoint} - - grpc_location = grpc_constraint.endpoint_location.location - location_kind = str(grpc_location.WhichOneof('location')) - if location_kind == 'region': - constraint_class = ConstraintEndpointLocationRegionModel - constraint_data.update({'region': grpc_location.region}) - return constraint_class, str_constraint_id, constraint_data, ConstraintKindEnum.ENDPOINT_LOCATION_REGION - elif location_kind == 'gps_position': - constraint_class = ConstraintEndpointLocationGpsPositionModel - gps_position = grpc_location.gps_position - constraint_data.update({'latitude': gps_position.latitude, 'longitude': gps_position.longitude}) - return constraint_class, str_constraint_id, constraint_data, ConstraintKindEnum.ENDPOINT_LOCATION_GPSPOSITION - else: - MSG = 'Location kind {:s} in Constraint of kind endpoint_location is not implemented: {:s}' - raise NotImplementedError(MSG.format(location_kind, grpc_message_to_json_string(grpc_constraint))) - -def parse_constraint_endpoint_priority(db_endpoint, grpc_constraint) -> Tuple_ConstraintSpecs: - grpc_endpoint_id = grpc_constraint.endpoint_priority.endpoint_id - # str_endpoint_key, db_endpoint = get_endpoint(database, grpc_endpoint_id) - - constraint_class = ConstraintEndpointPriorityModel - str_constraint_id = db_endpoint.endpoint_uuid - priority = grpc_constraint.endpoint_priority.priority - constraint_data = {'endpoint_fk': db_endpoint, 'priority': priority} - - return constraint_class, str_constraint_id, constraint_data, ConstraintKindEnum.ENDPOINT_PRIORITY - -def parse_constraint_sla_availability(grpc_constraint) -> Tuple_ConstraintSpecs: - constraint_class = ConstraintSlaAvailabilityModel - str_constraint_id = '' - constraint_data = { - 'num_disjoint_paths' : grpc_constraint.sla_availability.num_disjoint_paths, - 'all_active': grpc_constraint.sla_availability.all_active, - } - return constraint_class, str_constraint_id, constraint_data, ConstraintKindEnum.SLA_AVAILABILITY - -CONSTRAINT_PARSERS = { - 'custom' : parse_constraint_custom, - 'endpoint_location' : parse_constraint_endpoint_location, - 'endpoint_priority' : parse_constraint_endpoint_priority, - 'sla_availability' : parse_constraint_sla_availability, -} - -Union_ConstraintModel = Union[ - ConstraintCustomModel, ConstraintEndpointLocationGpsPositionModel, ConstraintEndpointLocationRegionModel, - ConstraintEndpointPriorityModel, ConstraintSlaAvailabilityModel -] - -# def set_constraint( -# db_constraints : ConstraintsModel, grpc_constraint : Constraint, position : int -# ) -> Tuple[Union_ConstraintModel, bool]: -# grpc_constraint_kind = str(grpc_constraint.WhichOneof('constraint')) -# -# parser = CONSTRAINT_PARSERS.get(grpc_constraint_kind) -# if parser is None: -# raise NotImplementedError('Constraint of kind {:s} is not implemented: {:s}'.format( -# grpc_constraint_kind, grpc_message_to_json_string(grpc_constraint))) -# -# # create specific constraint -# constraint_class, str_constraint_id, constraint_data, constraint_kind = parser(database, grpc_constraint) -# str_constraint_key_hash = fast_hasher(':'.join([constraint_kind.value, str_constraint_id])) -# str_constraint_key = key_to_str([db_constraints.pk, str_constraint_key_hash], separator=':') -# result : Tuple[Union_ConstraintModel, bool] = update_or_create_object( -# database, constraint_class, str_constraint_key, constraint_data) -# db_specific_constraint, updated = result -# -# # create generic constraint -# constraint_fk_field_name = 'constraint_{:s}_fk'.format(constraint_kind.value) -# constraint_data = { -# 'constraints_fk': db_constraints, 'position': position, 'kind': constraint_kind, -# constraint_fk_field_name: db_specific_constraint -# } -# result : Tuple[ConstraintModel, bool] = update_or_create_object( -# database, ConstraintModel, str_constraint_key, constraint_data) -# db_constraint, updated = result -# -# return db_constraint, updated -# -# def set_constraints( -# database : Database, db_parent_pk : str, constraints_name : str, grpc_constraints -# ) -> List[Tuple[Union[ConstraintsModel, ConstraintModel], bool]]: -# -# str_constraints_key = key_to_str([db_parent_pk, constraints_name], separator=':') -# result : Tuple[ConstraintsModel, bool] = get_or_create_object(database, ConstraintsModel, str_constraints_key) -# db_constraints, created = result -# -# db_objects = [(db_constraints, created)] -# -# for position,grpc_constraint in enumerate(grpc_constraints): -# result : Tuple[ConstraintModel, bool] = set_constraint( -# database, db_constraints, grpc_constraint, position) -# db_constraint, updated = result -# db_objects.append((db_constraint, updated)) -# -# return db_objects -def set_constraint( - database : Database, db_constraints : ConstraintsModel, grpc_constraint : Constraint, position : int -) -> Tuple[Union_ConstraintModel, bool]: - grpc_constraint_kind = str(grpc_constraint.WhichOneof('constraint')) - - parser = CONSTRAINT_PARSERS.get(grpc_constraint_kind) - if parser is None: - raise NotImplementedError('Constraint of kind {:s} is not implemented: {:s}'.format( - grpc_constraint_kind, grpc_message_to_json_string(grpc_constraint))) - - # create specific constraint - constraint_class, str_constraint_id, constraint_data, constraint_kind = parser(database, grpc_constraint) - str_constraint_key_hash = fast_hasher(':'.join([constraint_kind.value, str_constraint_id])) - str_constraint_key = key_to_str([db_constraints.pk, str_constraint_key_hash], separator=':') - result : Tuple[Union_ConstraintModel, bool] = update_or_create_object( - database, constraint_class, str_constraint_key, constraint_data) - db_specific_constraint, updated = result - - # create generic constraint - constraint_fk_field_name = 'constraint_{:s}_fk'.format(constraint_kind.value) - constraint_data = { - 'constraints_fk': db_constraints, 'position': position, 'kind': constraint_kind, - constraint_fk_field_name: db_specific_constraint - } - result : Tuple[ConstraintModel, bool] = update_or_create_object( - database, ConstraintModel, str_constraint_key, constraint_data) - db_constraint, updated = result - - return db_constraint, updated - -def set_constraints( - database : Database, db_parent_pk : str, constraints_name : str, grpc_constraints -) -> List[Tuple[Union[ConstraintsModel, ConstraintModel], bool]]: - - str_constraints_key = key_to_str([constraints_name, db_parent_pk], separator=':') - result : Tuple[ConstraintsModel, bool] = get_or_create_object(database, ConstraintsModel, str_constraints_key) - db_constraints, created = result - - db_objects = [(db_constraints, created)] - - for position,grpc_constraint in enumerate(grpc_constraints): - result : Tuple[ConstraintModel, bool] = set_constraint( - database, db_constraints, grpc_constraint, position) - db_constraint, updated = result - db_objects.append((db_constraint, updated)) - - return db_objects +class ConstraintModel(_Base): + __tablename__ = 'constraint' + + constraint_uuid = Column(UUID(as_uuid=False), primary_key=True) + service_uuid = Column(ForeignKey('service.service_uuid', ondelete='CASCADE'), nullable=False) + position = Column(Integer, nullable=False) + kind = Column(Enum(ConstraintKindEnum), nullable=False) + data = Column(String, nullable=False) + + __table_args__ = ( + CheckConstraint(position >= 0, name='check_position_value'), + #UniqueConstraint('service_uuid', 'position', name='unique_per_service'), + ) + + def dump(self) -> Dict: + return {self.kind.value: json.loads(self.data)} + + +#import logging, operator +#from typing import Dict, List, Optional, Tuple, Type, Union +#from common.orm.HighLevel import get_object, get_or_create_object, update_or_create_object +#from common.orm.backend.Tools import key_to_str +#from common.proto.context_pb2 import Constraint +#from common.tools.grpc.Tools import grpc_message_to_json_string +#from .EndPointModel import EndPointModel +#from .Tools import fast_hasher +#from sqlalchemy import Column, ForeignKey, String, Float, CheckConstraint, Integer, Boolean, Enum +#from sqlalchemy.dialects.postgresql import UUID +#from context.service.database.models._Base import Base +#import enum +# +#LOGGER = logging.getLogger(__name__) +# +#def remove_dict_key(dictionary : Dict, key : str): +# dictionary.pop(key, None) +# return dictionary +# +#class ConstraintsModel(Base): # pylint: disable=abstract-method +# __tablename__ = 'Constraints' +# constraints_uuid = Column(UUID(as_uuid=False), primary_key=True, unique=True) +# +# @staticmethod +# def main_pk_name(): +# return 'constraints_uuid' +# +# +# def dump(self, constraints) -> List[Dict]: +# constraints = sorted(constraints, key=operator.itemgetter('position')) +# return [remove_dict_key(constraint, 'position') for constraint in constraints] +# +# +#class ConstraintCustomModel(Base): # pylint: disable=abstract-method +# __tablename__ = 'ConstraintCustom' +# constraint_uuid = Column(UUID(as_uuid=False), primary_key=True, unique=True) +# constraint_type = Column(String, nullable=False) +# constraint_value = Column(String, nullable=False) +# +# @staticmethod +# def main_pk_name(): +# return 'constraint_uuid' +# +# +# def dump(self) -> Dict: # pylint: disable=arguments-differ +# return {'custom': {'constraint_type': self.constraint_type, 'constraint_value': self.constraint_value}} +# +# +#Union_ConstraintEndpoint = Union[ +# 'ConstraintEndpointLocationGpsPositionModel', 'ConstraintEndpointLocationRegionModel', +# 'ConstraintEndpointPriorityModel' +#] +# +#class ConstraintEndpointLocationRegionModel(Model): # pylint: disable=abstract-method +# endpoint_fk = ForeignKeyField(EndPointModel) +# region = StringField(required=True, allow_empty=False) +# +# def dump(self) -> Dict: # pylint: disable=arguments-differ +# json_endpoint_id = EndPointModel(self.database, self.endpoint_fk).dump_id() +# return {'endpoint_location': {'endpoint_id': json_endpoint_id, 'location': {'region': self.region}}} +# +## def dump_endpoint_id(endpoint_constraint: Union_ConstraintEndpoint): +## db_endpoints_pks = list(endpoint_constraint.references(EndPointModel)) +## num_endpoints = len(db_endpoints_pks) +## if num_endpoints != 1: +## raise Exception('Wrong number({:d}) of associated Endpoints with constraint'.format(num_endpoints)) +## db_endpoint = EndPointModel(endpoint_constraint.database, db_endpoints_pks[0]) +## return db_endpoint.dump_id() +# +# +#class ConstraintEndpointLocationRegionModel(Base): # pylint: disable=abstract-method +# __tablename__ = 'ConstraintEndpointLocationRegion' +# constraint_uuid = Column(UUID(as_uuid=False), primary_key=True, unique=True) +# endpoint_uuid = Column(UUID(as_uuid=False), ForeignKey("EndPoint.endpoint_uuid")) +# region = Column(String, nullable=False) +# +# @staticmethod +# def main_pk_name(): +# return 'constraint_uuid' +# +# def dump(self, endpoint) -> Dict: # pylint: disable=arguments-differ +# return {'endpoint_location': {'endpoint_id': endpoint.dump_id(), 'region': self.region}} +# +# def dump(self) -> Dict: # pylint: disable=arguments-differ +# gps_position = {'latitude': self.latitude, 'longitude': self.longitude} +# json_endpoint_id = EndPointModel(self.database, self.endpoint_fk).dump_id() +# return {'endpoint_location': {'endpoint_id': json_endpoint_id, 'location': {'gps_position': gps_position}}} +# +#class ConstraintEndpointPriorityModel(Model): # pylint: disable=abstract-method +# endpoint_fk = ForeignKeyField(EndPointModel) +# priority = IntegerField(required=True, min_value=0) +# +# def dump(self) -> Dict: # pylint: disable=arguments-differ +# json_endpoint_id = EndPointModel(self.database, self.endpoint_fk).dump_id() +# return {'endpoint_priority': {'endpoint_id': json_endpoint_id, 'priority': self.priority}} +# +#class ConstraintEndpointLocationGpsPositionModel(Base): # pylint: disable=abstract-method +# __tablename__ = 'ConstraintEndpointLocationGpsPosition' +# constraint_uuid = Column(UUID(as_uuid=False), primary_key=True, unique=True) +# endpoint_uuid = Column(UUID(as_uuid=False), ForeignKey("EndPoint.endpoint_uuid")) +# latitude = Column(Float, CheckConstraint('latitude > -90.0 AND latitude < 90.0'), nullable=False) +# longitude = Column(Float, CheckConstraint('longitude > -90.0 AND longitude < 90.0'), nullable=False) +# +# def dump(self, endpoint) -> Dict: # pylint: disable=arguments-differ +# gps_position = {'latitude': self.latitude, 'longitude': self.longitude} +# return {'endpoint_location': {'endpoint_id': endpoint.dump_id(), 'gps_position': gps_position}} +# +# +#class ConstraintEndpointPriorityModel(Base): # pylint: disable=abstract-method +# __tablename__ = 'ConstraintEndpointPriority' +# constraint_uuid = Column(UUID(as_uuid=False), primary_key=True, unique=True) +# endpoint_uuid = Column(UUID(as_uuid=False), ForeignKey("EndPoint.endpoint_uuid")) +# # endpoint_fk = ForeignKeyField(EndPointModel) +# # priority = FloatField(required=True) +# priority = Column(Float, nullable=False) +# @staticmethod +# def main_pk_name(): +# return 'constraint_uuid' +# +# def dump(self, endpoint) -> Dict: # pylint: disable=arguments-differ +# return {'endpoint_priority': {'endpoint_id': endpoint.dump_id(), 'priority': self.priority}} +# +# +#class ConstraintSlaAvailabilityModel(Base): # pylint: disable=abstract-method +# __tablename__ = 'ConstraintSlaAvailability' +# constraint_uuid = Column(UUID(as_uuid=False), primary_key=True, unique=True) +# # num_disjoint_paths = IntegerField(required=True, min_value=1) +# num_disjoint_paths = Column(Integer, CheckConstraint('num_disjoint_paths > 1'), nullable=False) +# # all_active = BooleanField(required=True) +# all_active = Column(Boolean, nullable=False) +# @staticmethod +# def main_pk_name(): +# return 'constraint_uuid' +# +# def dump(self) -> Dict: # pylint: disable=arguments-differ +# return {'sla_availability': {'num_disjoint_paths': self.num_disjoint_paths, 'all_active': self.all_active}} +# +#Union_SpecificConstraint = Union[ +# ConstraintCustomModel, ConstraintEndpointLocationRegionModel, ConstraintEndpointLocationGpsPositionModel, +# ConstraintEndpointPriorityModel, ConstraintSlaAvailabilityModel, +#] +# +#class ConstraintModel(Base): # pylint: disable=abstract-method +# __tablename__ = 'Constraint' +# # pk = PrimaryKeyField() +# # constraints_fk = ForeignKeyField(ConstraintsModel) +# constraint_uuid = Column(UUID(as_uuid=False), primary_key=True, unique=True) +# constraints_uuid = Column(UUID(as_uuid=False), ForeignKey("Constraints.constraints_uuid"), primary_key=True) +# # kind = EnumeratedField(ConstraintKindEnum) +# kind = Column(Enum(ConstraintKindEnum, create_constraint=False, native_enum=False)) +# # position = IntegerField(min_value=0, required=True) +# position = Column(Integer, CheckConstraint('position >= 0'), nullable=False) +# # constraint_custom_fk = ForeignKeyField(ConstraintCustomModel, required=False) +# constraint_custom = Column(UUID(as_uuid=False), ForeignKey("ConstraintCustom.constraint_uuid")) +# # constraint_ep_loc_region_fk = ForeignKeyField(ConstraintEndpointLocationRegionModel, required=False) +# constraint_ep_loc_region = Column(UUID(as_uuid=False), ForeignKey("ConstraintEndpointLocationRegion.constraint_uuid")) +# # constraint_ep_loc_gpspos_fk = ForeignKeyField(ConstraintEndpointLocationGpsPositionModel, required=False) +# constraint_ep_loc_gpspos = Column(UUID(as_uuid=False), ForeignKey("ConstraintEndpointLocationGpsPosition.constraint_uuid")) +# # constraint_ep_priority_fk = ForeignKeyField(ConstraintEndpointPriorityModel, required=False) +# constraint_ep_priority = Column(UUID(as_uuid=False), ForeignKey("ConstraintEndpointPriority.constraint_uuid"),) +# # constraint_sla_avail_fk = ForeignKeyField(ConstraintSlaAvailabilityModel, required=False) +# constraint_sla_avail = Column(UUID(as_uuid=False), ForeignKey("ConstraintSlaAvailability.constraint_uuid")) +# +# @staticmethod +# def main_pk_name(): +# return 'constraint_uuid' +# +# # def delete(self) -> None: +# # field_name = 'constraint_{:s}_fk'.format(str(self.kind.value)) +# # specific_fk_value : Optional[ForeignKeyField] = getattr(self, field_name, None) +# # if specific_fk_value is None: +# # raise Exception('Unable to find constraint key for field_name({:s})'.format(field_name)) +# # specific_fk_class = getattr(ConstraintModel, field_name, None) +# # foreign_model_class : Model = specific_fk_class.foreign_model +# # super().delete() +# # get_object(self.database, foreign_model_class, str(specific_fk_value)).delete() +# +# def dump(self, include_position=True) -> Dict: # pylint: disable=arguments-differ +# field_name = 'constraint_{:s}'.format(str(self.kind.value)) +# specific_fk_value = getattr(self, field_name, None) +# if specific_fk_value is None: +# raise Exception('Unable to find constraint key for field_name({:s})'.format(field_name)) +# specific_fk_class = getattr(ConstraintModel, field_name, None) +# foreign_model_class: Base = specific_fk_class.foreign_model +# constraint: Union_SpecificConstraint = get_object(self.database, foreign_model_class, str(specific_fk_value)) +# result = constraint.dump() +# if include_position: +# result['position'] = self.position +# return result +# +#Tuple_ConstraintSpecs = Tuple[Type, str, Dict, ConstraintKindEnum] +# +#def parse_constraint_custom(grpc_constraint) -> Tuple_ConstraintSpecs: +# constraint_class = ConstraintCustomModel +# str_constraint_id = grpc_constraint.custom.constraint_type +# constraint_data = { +# 'constraint_type' : grpc_constraint.custom.constraint_type, +# 'constraint_value': grpc_constraint.custom.constraint_value, +# } +# return constraint_class, str_constraint_id, constraint_data, ConstraintKindEnum.CUSTOM +# +#def parse_constraint_endpoint_location(db_endpoint, grpc_constraint) -> Tuple_ConstraintSpecs: +# grpc_endpoint_id = grpc_constraint.endpoint_location.endpoint_id +# # str_endpoint_key, db_endpoint = get_endpoint(database, grpc_endpoint_id) +# +# str_constraint_id = db_endpoint.endpoint_uuid +# constraint_data = {'endpoint_fk': db_endpoint} +# +# grpc_location = grpc_constraint.endpoint_location.location +# location_kind = str(grpc_location.WhichOneof('location')) +# if location_kind == 'region': +# constraint_class = ConstraintEndpointLocationRegionModel +# constraint_data.update({'region': grpc_location.region}) +# return constraint_class, str_constraint_id, constraint_data, ConstraintKindEnum.ENDPOINT_LOCATION_REGION +# elif location_kind == 'gps_position': +# constraint_class = ConstraintEndpointLocationGpsPositionModel +# gps_position = grpc_location.gps_position +# constraint_data.update({'latitude': gps_position.latitude, 'longitude': gps_position.longitude}) +# return constraint_class, str_constraint_id, constraint_data, ConstraintKindEnum.ENDPOINT_LOCATION_GPSPOSITION +# else: +# MSG = 'Location kind {:s} in Constraint of kind endpoint_location is not implemented: {:s}' +# raise NotImplementedError(MSG.format(location_kind, grpc_message_to_json_string(grpc_constraint))) +# +#def parse_constraint_endpoint_priority(db_endpoint, grpc_constraint) -> Tuple_ConstraintSpecs: +# grpc_endpoint_id = grpc_constraint.endpoint_priority.endpoint_id +# # str_endpoint_key, db_endpoint = get_endpoint(database, grpc_endpoint_id) +# +# constraint_class = ConstraintEndpointPriorityModel +# str_constraint_id = db_endpoint.endpoint_uuid +# priority = grpc_constraint.endpoint_priority.priority +# constraint_data = {'endpoint_fk': db_endpoint, 'priority': priority} +# +# return constraint_class, str_constraint_id, constraint_data, ConstraintKindEnum.ENDPOINT_PRIORITY +# +#def parse_constraint_sla_availability(grpc_constraint) -> Tuple_ConstraintSpecs: +# constraint_class = ConstraintSlaAvailabilityModel +# str_constraint_id = '' +# constraint_data = { +# 'num_disjoint_paths' : grpc_constraint.sla_availability.num_disjoint_paths, +# 'all_active': grpc_constraint.sla_availability.all_active, +# } +# return constraint_class, str_constraint_id, constraint_data, ConstraintKindEnum.SLA_AVAILABILITY +# +#CONSTRAINT_PARSERS = { +# 'custom' : parse_constraint_custom, +# 'endpoint_location' : parse_constraint_endpoint_location, +# 'endpoint_priority' : parse_constraint_endpoint_priority, +# 'sla_availability' : parse_constraint_sla_availability, +#} +# +#Union_ConstraintModel = Union[ +# ConstraintCustomModel, ConstraintEndpointLocationGpsPositionModel, ConstraintEndpointLocationRegionModel, +# ConstraintEndpointPriorityModel, ConstraintSlaAvailabilityModel +#] +# +## def set_constraint( +## db_constraints : ConstraintsModel, grpc_constraint : Constraint, position : int +## ) -> Tuple[Union_ConstraintModel, bool]: +## grpc_constraint_kind = str(grpc_constraint.WhichOneof('constraint')) +## +## parser = CONSTRAINT_PARSERS.get(grpc_constraint_kind) +## if parser is None: +## raise NotImplementedError('Constraint of kind {:s} is not implemented: {:s}'.format( +## grpc_constraint_kind, grpc_message_to_json_string(grpc_constraint))) +## +## # create specific constraint +## constraint_class, str_constraint_id, constraint_data, constraint_kind = parser(database, grpc_constraint) +## str_constraint_key_hash = fast_hasher(':'.join([constraint_kind.value, str_constraint_id])) +## str_constraint_key = key_to_str([db_constraints.pk, str_constraint_key_hash], separator=':') +## result : Tuple[Union_ConstraintModel, bool] = update_or_create_object( +## database, constraint_class, str_constraint_key, constraint_data) +## db_specific_constraint, updated = result +## +## # create generic constraint +## constraint_fk_field_name = 'constraint_{:s}_fk'.format(constraint_kind.value) +## constraint_data = { +## 'constraints_fk': db_constraints, 'position': position, 'kind': constraint_kind, +## constraint_fk_field_name: db_specific_constraint +## } +## result : Tuple[ConstraintModel, bool] = update_or_create_object( +## database, ConstraintModel, str_constraint_key, constraint_data) +## db_constraint, updated = result +## +## return db_constraint, updated +## +## def set_constraints( +## database : Database, db_parent_pk : str, constraints_name : str, grpc_constraints +## ) -> List[Tuple[Union[ConstraintsModel, ConstraintModel], bool]]: +## +## str_constraints_key = key_to_str([db_parent_pk, constraints_name], separator=':') +## result : Tuple[ConstraintsModel, bool] = get_or_create_object(database, ConstraintsModel, str_constraints_key) +## db_constraints, created = result +## +## db_objects = [(db_constraints, created)] +## +## for position,grpc_constraint in enumerate(grpc_constraints): +## result : Tuple[ConstraintModel, bool] = set_constraint( +## database, db_constraints, grpc_constraint, position) +## db_constraint, updated = result +## db_objects.append((db_constraint, updated)) +## +## return db_objects +#def set_constraint( +# database : Database, db_constraints : ConstraintsModel, grpc_constraint : Constraint, position : int +#) -> Tuple[Union_ConstraintModel, bool]: +# grpc_constraint_kind = str(grpc_constraint.WhichOneof('constraint')) +# +# parser = CONSTRAINT_PARSERS.get(grpc_constraint_kind) +# if parser is None: +# raise NotImplementedError('Constraint of kind {:s} is not implemented: {:s}'.format( +# grpc_constraint_kind, grpc_message_to_json_string(grpc_constraint))) +# +# # create specific constraint +# constraint_class, str_constraint_id, constraint_data, constraint_kind = parser(database, grpc_constraint) +# str_constraint_key_hash = fast_hasher(':'.join([constraint_kind.value, str_constraint_id])) +# str_constraint_key = key_to_str([db_constraints.pk, str_constraint_key_hash], separator=':') +# result : Tuple[Union_ConstraintModel, bool] = update_or_create_object( +# database, constraint_class, str_constraint_key, constraint_data) +# db_specific_constraint, updated = result +# +# # create generic constraint +# constraint_fk_field_name = 'constraint_{:s}_fk'.format(constraint_kind.value) +# constraint_data = { +# 'constraints_fk': db_constraints, 'position': position, 'kind': constraint_kind, +# constraint_fk_field_name: db_specific_constraint +# } +# result : Tuple[ConstraintModel, bool] = update_or_create_object( +# database, ConstraintModel, str_constraint_key, constraint_data) +# db_constraint, updated = result +# +# return db_constraint, updated +# +#def set_constraints( +# database : Database, db_parent_pk : str, constraints_name : str, grpc_constraints +#) -> List[Tuple[Union[ConstraintsModel, ConstraintModel], bool]]: +# +# str_constraints_key = key_to_str([constraints_name, db_parent_pk], separator=':') +# result : Tuple[ConstraintsModel, bool] = get_or_create_object(database, ConstraintsModel, str_constraints_key) +# db_constraints, created = result +# +# db_objects = [(db_constraints, created)] +# +# for position,grpc_constraint in enumerate(grpc_constraints): +# result : Tuple[ConstraintModel, bool] = set_constraint( +# database, db_constraints, grpc_constraint, position) +# db_constraint, updated = result +# db_objects.append((db_constraint, updated)) +# +# return db_objects diff --git a/src/context/service/database/models/ContextModel.py b/src/context/service/database/models/ContextModel.py index 84039dea9..1a282e8bd 100644 --- a/src/context/service/database/models/ContextModel.py +++ b/src/context/service/database/models/ContextModel.py @@ -25,7 +25,7 @@ class ContextModel(_Base): context_name = Column(String, nullable=False) topologies = relationship('TopologyModel', back_populates='context') - #services = relationship('ServiceModel', back_populates='context') + services = relationship('ServiceModel', back_populates='context') #slices = relationship('SliceModel', back_populates='context') def dump_id(self) -> Dict: @@ -36,6 +36,6 @@ class ContextModel(_Base): 'context_id' : self.dump_id(), 'name' : self.context_name, 'topology_ids': [obj.dump_id() for obj in self.topologies], - #'service_ids' : [obj.dump_id() for obj in self.services ], - #'slice_ids' : [obj.dump_id() for obj in self.slices ], + 'service_ids' : [obj.dump_id() for obj in self.services ], + #'slice_ids' : [obj.dump_id() for obj in self.slices ], } diff --git a/src/context/service/database/models/DeviceModel.py b/src/context/service/database/models/DeviceModel.py index 50db8e7bb..74fa70cf8 100644 --- a/src/context/service/database/models/DeviceModel.py +++ b/src/context/service/database/models/DeviceModel.py @@ -14,8 +14,8 @@ import operator from typing import Dict -from sqlalchemy import Column, String, Enum -from sqlalchemy.dialects.postgresql import UUID, ARRAY +from sqlalchemy import Column, Enum, String +from sqlalchemy.dialects.postgresql import ARRAY, UUID from sqlalchemy.orm import relationship from .enums.DeviceDriver import ORM_DeviceDriverEnum from .enums.DeviceOperationalStatus import ORM_DeviceOperationalStatusEnum @@ -27,7 +27,7 @@ class DeviceModel(_Base): device_uuid = Column(UUID(as_uuid=False), primary_key=True) device_name = Column(String, nullable=False) device_type = Column(String, nullable=False) - device_operational_status = Column(Enum(ORM_DeviceOperationalStatusEnum)) + device_operational_status = Column(Enum(ORM_DeviceOperationalStatusEnum), nullable=False) device_drivers = Column(ARRAY(Enum(ORM_DeviceDriverEnum), dimensions=1)) #topology_devices = relationship('TopologyDeviceModel', back_populates='device') diff --git a/src/context/service/database/models/EndPointModel.py b/src/context/service/database/models/EndPointModel.py index f9d5f7658..b69b4978b 100644 --- a/src/context/service/database/models/EndPointModel.py +++ b/src/context/service/database/models/EndPointModel.py @@ -23,10 +23,10 @@ class EndPointModel(_Base): __tablename__ = 'endpoint' endpoint_uuid = Column(UUID(as_uuid=False), primary_key=True) - device_uuid = Column(UUID(as_uuid=False), ForeignKey('device.device_uuid', ondelete='CASCADE' )) - topology_uuid = Column(UUID(as_uuid=False), ForeignKey('topology.topology_uuid', ondelete='RESTRICT')) - name = Column(String) - endpoint_type = Column(String) + device_uuid = Column(ForeignKey('device.device_uuid', ondelete='CASCADE' ), nullable=False) + topology_uuid = Column(ForeignKey('topology.topology_uuid', ondelete='RESTRICT'), nullable=False) + name = Column(String, nullable=False) + endpoint_type = Column(String, nullable=False) kpi_sample_types = Column(ARRAY(Enum(ORM_KpiSampleTypeEnum), dimensions=1)) device = relationship('DeviceModel', back_populates='endpoints') diff --git a/src/context/service/database/models/LinkModel.py b/src/context/service/database/models/LinkModel.py index 053dc0122..fd4f80c16 100644 --- a/src/context/service/database/models/LinkModel.py +++ b/src/context/service/database/models/LinkModel.py @@ -25,7 +25,7 @@ class LinkModel(_Base): link_name = Column(String, nullable=False) #topology_links = relationship('TopologyLinkModel', back_populates='link') - link_endpoints = relationship('LinkEndPointModel', back_populates='link') #, lazy='joined') + link_endpoints = relationship('LinkEndPointModel') # lazy='joined', back_populates='link' def dump_id(self) -> Dict: return {'link_uuid': {'uuid': self.link_uuid}} diff --git a/src/context/service/database/models/RelationModels.py b/src/context/service/database/models/RelationModels.py index 89e8e05e0..a57d85eb3 100644 --- a/src/context/service/database/models/RelationModels.py +++ b/src/context/service/database/models/RelationModels.py @@ -31,33 +31,14 @@ class LinkEndPointModel(_Base): link = relationship('LinkModel', back_populates='link_endpoints', lazy='joined') endpoint = relationship('EndPointModel', lazy='joined') # back_populates='link_endpoints' -#class ServiceEndPointModel(_Base): -# __tablename__ = 'service_endpoint' -# -# context_uuid = Column(UUID(as_uuid=False), primary_key=True) -# service_uuid = Column(UUID(as_uuid=False), primary_key=True) -# topology_uuid = Column(UUID(as_uuid=False), primary_key=True) -# device_uuid = Column(UUID(as_uuid=False), primary_key=True) -# endpoint_uuid = Column(UUID(as_uuid=False), primary_key=True) -# -# service = relationship('ServiceModel', back_populates='service_endpoints', lazy='joined') -# endpoint = relationship('EndPointModel', back_populates='service_endpoints', lazy='joined') -# writer = relationship( -# "Writer", -# primaryjoin="and_(Writer.id == foreign(Article.writer_id), " -# "Writer.magazine_id == Article.magazine_id)", -# ) -# -# __table_args__ = ( -# ForeignKeyConstraint( -# ['context_uuid', 'service_uuid'], -# ['service.context_uuid', 'service.service_uuid'], -# ondelete='CASCADE'), -# ForeignKeyConstraint( -# ['context_uuid', 'topology_uuid', 'device_uuid', 'endpoint_uuid'], -# ['endpoint.context_uuid', 'endpoint.topology_uuid', 'endpoint.device_uuid', 'endpoint.endpoint_uuid'], -# ondelete='CASCADE'), -# ) +class ServiceEndPointModel(_Base): + __tablename__ = 'service_endpoint' + + service_uuid = Column(ForeignKey('service.service_uuid', ondelete='CASCADE' ), primary_key=True) + endpoint_uuid = Column(ForeignKey('endpoint.endpoint_uuid', ondelete='RESTRICT'), primary_key=True) + + service = relationship('ServiceModel', back_populates='service_endpoints', lazy='joined') + endpoint = relationship('EndPointModel', lazy='joined') # back_populates='service_endpoints' # class SliceEndPointModel(Model): # pk = PrimaryKeyField() diff --git a/src/context/service/database/models/ServiceModel.py b/src/context/service/database/models/ServiceModel.py index ea4e89526..b08043844 100644 --- a/src/context/service/database/models/ServiceModel.py +++ b/src/context/service/database/models/ServiceModel.py @@ -13,8 +13,8 @@ # limitations under the License. import operator -from sqlalchemy import Column, Enum, Float, ForeignKey, String from typing import Dict +from sqlalchemy import Column, Enum, ForeignKey, String from sqlalchemy.dialects.postgresql import UUID from sqlalchemy.orm import relationship from .enums.ServiceStatus import ORM_ServiceStatusEnum @@ -24,17 +24,16 @@ from ._Base import _Base class ServiceModel(_Base): __tablename__ = 'service' - context_uuid = Column(UUID(as_uuid=False), ForeignKey('context.context_uuid'), primary_key=True) service_uuid = Column(UUID(as_uuid=False), primary_key=True) + context_uuid = Column(ForeignKey('context.context_uuid'), nullable=False) service_name = Column(String, nullable=False) - service_type = Column(Enum(ORM_ServiceTypeEnum)) - service_status = Column(Enum(ORM_ServiceStatusEnum)) - created_at = Column(Float) + service_type = Column(Enum(ORM_ServiceTypeEnum), nullable=False) + service_status = Column(Enum(ORM_ServiceStatusEnum), nullable=False) context = relationship('ContextModel', back_populates='services') - service_endpoints = relationship('ServiceEndPointModel', back_populates='service') #, lazy='joined') - #constraints = relationship('ConstraintModel', passive_deletes=True, back_populates='service', lazy='joined') - config_rules = relationship('ConfigRuleModel', passive_deletes=True, back_populates='service', lazy='joined') + service_endpoints = relationship('ServiceEndPointModel') # lazy='joined', back_populates='service' + constraints = relationship('ConstraintModel', passive_deletes=True) # lazy='joined', back_populates='service' + config_rules = relationship('ConfigRuleModel', passive_deletes=True) # lazy='joined', back_populates='service' def dump_id(self) -> Dict: return { @@ -53,8 +52,8 @@ class ServiceModel(_Base): for service_endpoint in self.service_endpoints ], 'service_constraints' : [ - #constraint.dump() - #for constraint in sorted(self.constraints, key=operator.attrgetter('position')) + constraint.dump() + for constraint in sorted(self.constraints, key=operator.attrgetter('position')) ], 'service_config' : {'config_rules': [ config_rule.dump() diff --git a/src/context/service/database/models/TopologyModel.py b/src/context/service/database/models/TopologyModel.py index e0119bead..8c59bf58a 100644 --- a/src/context/service/database/models/TopologyModel.py +++ b/src/context/service/database/models/TopologyModel.py @@ -22,7 +22,7 @@ class TopologyModel(_Base): __tablename__ = 'topology' topology_uuid = Column(UUID(as_uuid=False), primary_key=True) - context_uuid = Column(UUID(as_uuid=False), ForeignKey('context.context_uuid')) + context_uuid = Column(ForeignKey('context.context_uuid'), nullable=False) topology_name = Column(String, nullable=False) context = relationship('ContextModel', back_populates='topologies') diff --git a/src/context/service/database/uuids/Service.py b/src/context/service/database/uuids/Service.py new file mode 100644 index 000000000..56a5d12a0 --- /dev/null +++ b/src/context/service/database/uuids/Service.py @@ -0,0 +1,37 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Tuple +from common.proto.context_pb2 import ServiceId +from common.rpc_method_wrapper.ServiceExceptions import InvalidArgumentsException +from ._Builder import get_uuid_from_string, get_uuid_random +from .Context import context_get_uuid + +def service_get_uuid( + service_id : ServiceId, service_name : str = '', allow_random : bool = False +) -> Tuple[str, str]: + context_uuid = context_get_uuid(service_id.context_id, allow_random=False) + raw_service_uuid = service_id.service_uuid.uuid + + if len(raw_service_uuid) > 0: + return context_uuid, get_uuid_from_string(raw_service_uuid, prefix_for_name=context_uuid) + if len(service_name) > 0: + return context_uuid, get_uuid_from_string(service_name, prefix_for_name=context_uuid) + if allow_random: + return context_uuid, get_uuid_random() + + raise InvalidArgumentsException([ + ('service_id.service_uuid.uuid', raw_service_uuid), + ('name', service_name), + ], extra_details=['At least one is required to produce a Service UUID']) diff --git a/src/context/tests/_test_service.py b/src/context/tests/test_service.py similarity index 58% rename from src/context/tests/_test_service.py rename to src/context/tests/test_service.py index 8bd6570de..ca81bbfa3 100644 --- a/src/context/tests/_test_service.py +++ b/src/context/tests/test_service.py @@ -13,108 +13,105 @@ # limitations under the License. import copy, grpc, pytest -from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID from common.proto.context_pb2 import ( Context, ContextId, Device, DeviceId, Service, ServiceId, ServiceStatusEnum, ServiceTypeEnum, Topology, TopologyId) from context.client.ContextClient import ContextClient +from context.service.database.uuids.Service import service_get_uuid #from context.client.EventsCollector import EventsCollector from .Objects import ( - CONTEXT, CONTEXT_ID, DEVICE_R1, DEVICE_R1_ID, DEVICE_R1_UUID, DEVICE_R2, DEVICE_R2_ID, DEVICE_R2_UUID, - SERVICE_R1_R2, SERVICE_R1_R2_ID, SERVICE_R1_R2_UUID, TOPOLOGY, TOPOLOGY_ID) + CONTEXT, CONTEXT_ID, CONTEXT_NAME, DEVICE_R1, DEVICE_R1_ID, SERVICE_R1_R2_NAME, DEVICE_R2, DEVICE_R2_ID, + SERVICE_R1_R2, SERVICE_R1_R2_ID, TOPOLOGY, TOPOLOGY_ID) -def grpc_service(context_client_grpc : ContextClient) -> None: +@pytest.mark.depends(on=['context/tests/test_link.py::test_link']) +def test_service(context_client : ContextClient) -> None: # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- #events_collector = EventsCollector( - # context_client_grpc, log_events_received=True, + # context_client, log_events_received=True, # activate_context_collector = False, activate_topology_collector = False, activate_device_collector = False, # activate_link_collector = False, activate_service_collector = True, activate_slice_collector = False, # activate_connection_collector = False) #events_collector.start() # ----- Prepare dependencies for the test and capture related events ----------------------------------------------- - response = context_client_grpc.SetContext(Context(**CONTEXT)) - assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - response = context_client_grpc.SetTopology(Topology(**TOPOLOGY)) - assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - - response = context_client_grpc.SetDevice(Device(**DEVICE_R1)) - assert response.device_uuid.uuid == DEVICE_R1_UUID - - response = context_client_grpc.SetDevice(Device(**DEVICE_R2)) - assert response.device_uuid.uuid == DEVICE_R2_UUID + context_client.SetContext(Context(**CONTEXT)) + context_client.SetTopology(Topology(**TOPOLOGY)) + context_client.SetDevice(Device(**DEVICE_R1)) + context_client.SetDevice(Device(**DEVICE_R2)) # events = events_collector.get_events(block=True, count=4) # assert isinstance(events[0], ContextEvent) # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - # assert events[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + # assert events[0].context_id.context_uuid.uuid == context_uuid # assert isinstance(events[1], TopologyEvent) # assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - # assert events[1].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - # assert events[1].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + # assert events[1].topology_id.context_id.context_uuid.uuid == context_uuid + # assert events[1].topology_id.topology_uuid.uuid == topology_uuid # assert isinstance(events[2], DeviceEvent) # assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - # assert events[2].device_id.device_uuid.uuid == DEVICE_R1_UUID + # assert events[2].device_id.device_uuid.uuid == device_r1_uuid # assert isinstance(events[3], DeviceEvent) # assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - # assert events[3].device_id.device_uuid.uuid == DEVICE_R2_UUID + # assert events[3].device_id.device_uuid.uuid == device_r2_uuid # ----- Get when the object does not exist ------------------------------------------------------------------------- + service_id = ServiceId(**SERVICE_R1_R2_ID) + context_uuid,service_uuid = service_get_uuid(service_id, allow_random=False) with pytest.raises(grpc.RpcError) as e: - context_client_grpc.GetService(ServiceId(**SERVICE_R1_R2_ID)) + context_client.GetService(service_id) assert e.value.code() == grpc.StatusCode.NOT_FOUND - assert e.value.details() == 'Service({:s}) not found'.format(SERVICE_R1_R2_UUID) + MSG = 'Service({:s}/{:s}) not found; context_uuid generated was: {:s}; service_uuid generated was: {:s}' + assert e.value.details() == MSG.format(CONTEXT_NAME, SERVICE_R1_R2_NAME, context_uuid, service_uuid) # ----- List when the object does not exist ------------------------------------------------------------------------ - response = context_client_grpc.GetContext(ContextId(**CONTEXT_ID)) + response = context_client.GetContext(ContextId(**CONTEXT_ID)) assert len(response.topology_ids) == 1 assert len(response.service_ids) == 0 assert len(response.slice_ids) == 0 - response = context_client_grpc.ListServiceIds(ContextId(**CONTEXT_ID)) + response = context_client.ListServiceIds(ContextId(**CONTEXT_ID)) assert len(response.service_ids) == 0 - response = context_client_grpc.ListServices(ContextId(**CONTEXT_ID)) + response = context_client.ListServices(ContextId(**CONTEXT_ID)) assert len(response.services) == 0 # ----- Create the object ------------------------------------------------------------------------------------------ with pytest.raises(grpc.RpcError) as e: + WRONG_UUID = 'ffffffff-ffff-ffff-ffff-ffffffffffff' WRONG_SERVICE = copy.deepcopy(SERVICE_R1_R2) - WRONG_SERVICE['service_endpoint_ids'][0]\ - ['topology_id']['context_id']['context_uuid']['uuid'] = 'ca1ea172-728f-441d-972c-feeae8c9bffc' - context_client_grpc.SetService(Service(**WRONG_SERVICE)) + WRONG_SERVICE['service_endpoint_ids'][0]['topology_id']['context_id']['context_uuid']['uuid'] = WRONG_UUID + context_client.SetService(Service(**WRONG_SERVICE)) assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT - msg = 'request.service_endpoint_ids[0].topology_id.context_id.context_uuid.uuid(ca1ea172-728f-441d-972c-feeae8c9bffc) is invalid; '\ - 'should be == request.service_id.context_id.context_uuid.uuid({:s})'.format(DEFAULT_CONTEXT_UUID) - assert e.value.details() == msg + MSG = 'request.service_endpoint_ids[0].topology_id.context_id.context_uuid.uuid({}) is invalid; '\ + 'should be == request.service_id.context_id.context_uuid.uuid({})' + raw_context_uuid = service_id.context_id.context_uuid.uuid # pylint: disable=no-member + assert e.value.details() == MSG.format(WRONG_UUID, raw_context_uuid) - response = context_client_grpc.SetService(Service(**SERVICE_R1_R2)) - assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.service_uuid.uuid == SERVICE_R1_R2_UUID + response = context_client.SetService(Service(**SERVICE_R1_R2)) + assert response.context_id.context_uuid.uuid == context_uuid + assert response.service_uuid.uuid == service_uuid # ----- Check create event ----------------------------------------------------------------------------------------- #event = events_collector.get_event(block=True) #assert isinstance(event, ServiceEvent) #assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE - #assert event.service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - #assert event.service_id.service_uuid.uuid == SERVICE_R1_R2_UUID + #assert event.service_id.context_id.context_uuid.uuid == context_uuid + #assert event.service_id.service_uuid.uuid == service_uuid # ----- Get when the object exists --------------------------------------------------------------------------------- - response = context_client_grpc.GetContext(ContextId(**CONTEXT_ID)) - assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.name == '' + response = context_client.GetContext(ContextId(**CONTEXT_ID)) + assert response.context_id.context_uuid.uuid == context_uuid + assert response.name == CONTEXT_NAME assert len(response.topology_ids) == 1 assert len(response.service_ids) == 1 - assert response.service_ids[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.service_ids[0].service_uuid.uuid == SERVICE_R1_R2_UUID + assert response.service_ids[0].context_id.context_uuid.uuid == context_uuid + assert response.service_ids[0].service_uuid.uuid == service_uuid assert len(response.slice_ids) == 0 - response = context_client_grpc.GetService(ServiceId(**SERVICE_R1_R2_ID)) - assert response.service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.service_id.service_uuid.uuid == SERVICE_R1_R2_UUID - assert response.name == '' + response = context_client.GetService(ServiceId(**SERVICE_R1_R2_ID)) + assert response.service_id.context_id.context_uuid.uuid == context_uuid + assert response.service_id.service_uuid.uuid == service_uuid + assert response.name == SERVICE_R1_R2_NAME assert response.service_type == ServiceTypeEnum.SERVICETYPE_L3NM assert len(response.service_endpoint_ids) == 2 assert len(response.service_constraints) == 2 @@ -122,106 +119,108 @@ def grpc_service(context_client_grpc : ContextClient) -> None: assert len(response.service_config.config_rules) == 3 # ----- List when the object exists -------------------------------------------------------------------------------- - response = context_client_grpc.ListServiceIds(ContextId(**CONTEXT_ID)) + response = context_client.ListServiceIds(ContextId(**CONTEXT_ID)) assert len(response.service_ids) == 1 - assert response.service_ids[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.service_ids[0].service_uuid.uuid == SERVICE_R1_R2_UUID + assert response.service_ids[0].context_id.context_uuid.uuid == context_uuid + assert response.service_ids[0].service_uuid.uuid == service_uuid - response = context_client_grpc.ListServices(ContextId(**CONTEXT_ID)) + response = context_client.ListServices(ContextId(**CONTEXT_ID)) assert len(response.services) == 1 - assert response.services[0].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.services[0].service_id.service_uuid.uuid == SERVICE_R1_R2_UUID - assert response.services[0].name == '' - assert response.service_type == ServiceTypeEnum.SERVICETYPE_L3NM - assert len(response.service_endpoint_ids) == 2 - assert len(response.service_constraints) == 2 - assert response.service_status.service_status == ServiceStatusEnum.SERVICESTATUS_PLANNED - assert len(response.service_config.config_rules) == 3 + assert response.services[0].service_id.context_id.context_uuid.uuid == context_uuid + assert response.services[0].service_id.service_uuid.uuid == service_uuid + assert response.services[0].name == SERVICE_R1_R2_NAME + assert response.services[0].service_type == ServiceTypeEnum.SERVICETYPE_L3NM + assert len(response.services[0].service_endpoint_ids) == 2 + assert len(response.services[0].service_constraints) == 2 + assert response.services[0].service_status.service_status == ServiceStatusEnum.SERVICESTATUS_PLANNED + assert len(response.services[0].service_config.config_rules) == 3 # ----- Update the object ------------------------------------------------------------------------------------------ - new_service_name = 'svc:r1-r2' + new_service_name = 'new' SERVICE_UPDATED = copy.deepcopy(SERVICE_R1_R2) SERVICE_UPDATED['name'] = new_service_name - response = context_client_grpc.SetService(Service(**SERVICE_UPDATED)) - assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.service_uuid.uuid == SERVICE_R1_R2_UUID + SERVICE_UPDATED['service_status']['service_status'] = ServiceStatusEnum.SERVICESTATUS_ACTIVE + response = context_client.SetService(Service(**SERVICE_UPDATED)) + assert response.context_id.context_uuid.uuid == context_uuid + assert response.service_uuid.uuid == service_uuid # ----- Check update event ----------------------------------------------------------------------------------------- #event = events_collector.get_event(block=True) #assert isinstance(event, ServiceEvent) #assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - #assert event.service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - #assert event.service_id.service_uuid.uuid == SERVICE_R1_R2_UUID + #assert event.service_id.context_id.context_uuid.uuid == context_uuid + #assert event.service_id.service_uuid.uuid == service_uuid # ----- Get when the object is modified ---------------------------------------------------------------------------- - response = context_client_grpc.GetService(ServiceId(**SERVICE_R1_R2_ID)) - assert response.service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.service_id.service_uuid.uuid == SERVICE_R1_R2_UUID + response = context_client.GetService(ServiceId(**SERVICE_R1_R2_ID)) + assert response.service_id.context_id.context_uuid.uuid == context_uuid + assert response.service_id.service_uuid.uuid == service_uuid assert response.name == new_service_name assert response.service_type == ServiceTypeEnum.SERVICETYPE_L3NM assert len(response.service_endpoint_ids) == 2 assert len(response.service_constraints) == 2 - assert response.service_status.service_status == ServiceStatusEnum.SERVICESTATUS_PLANNED + assert response.service_status.service_status == ServiceStatusEnum.SERVICESTATUS_ACTIVE assert len(response.service_config.config_rules) == 3 # ----- List when the object is modified --------------------------------------------------------------------------- - response = context_client_grpc.ListServiceIds(ContextId(**CONTEXT_ID)) + response = context_client.ListServiceIds(ContextId(**CONTEXT_ID)) assert len(response.service_ids) == 1 - assert response.service_ids[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.service_ids[0].service_uuid.uuid == SERVICE_R1_R2_UUID + assert response.service_ids[0].context_id.context_uuid.uuid == context_uuid + assert response.service_ids[0].service_uuid.uuid == service_uuid - response = context_client_grpc.ListServices(ContextId(**CONTEXT_ID)) + response = context_client.ListServices(ContextId(**CONTEXT_ID)) assert len(response.services) == 1 - assert response.services[0].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.services[0].service_id.service_uuid.uuid == SERVICE_R1_R2_UUID + assert response.services[0].service_id.context_id.context_uuid.uuid == context_uuid + assert response.services[0].service_id.service_uuid.uuid == service_uuid assert response.services[0].name == new_service_name assert response.services[0].service_type == ServiceTypeEnum.SERVICETYPE_L3NM assert len(response.services[0].service_endpoint_ids) == 2 assert len(response.services[0].service_constraints) == 2 - assert response.services[0].service_status.service_status == ServiceStatusEnum.SERVICESTATUS_PLANNED + assert response.services[0].service_status.service_status == ServiceStatusEnum.SERVICESTATUS_ACTIVE assert len(response.services[0].service_config.config_rules) == 3 # ----- Remove the object ------------------------------------------------------------------------------------------ - context_client_grpc.RemoveService(ServiceId(**SERVICE_R1_R2_ID)) + context_client.RemoveService(ServiceId(**SERVICE_R1_R2_ID)) # ----- Check remove event ----------------------------------------------------------------------------------------- #event = events_collector.get_event(block=True) #assert isinstance(event, ServiceEvent) - #assert event.service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - #assert event.service_id.service_uuid.uuid == SERVICE_R1_R2_UUID + #assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert event.service_id.context_id.context_uuid.uuid == context_uuid + #assert event.service_id.service_uuid.uuid == service_uuid # ----- List after deleting the object ----------------------------------------------------------------------------- - response = context_client_grpc.GetContext(ContextId(**CONTEXT_ID)) + response = context_client.GetContext(ContextId(**CONTEXT_ID)) assert len(response.topology_ids) == 1 assert len(response.service_ids) == 0 assert len(response.slice_ids) == 0 - response = context_client_grpc.ListServiceIds(ContextId(**CONTEXT_ID)) + response = context_client.ListServiceIds(ContextId(**CONTEXT_ID)) assert len(response.service_ids) == 0 - response = context_client_grpc.ListServices(ContextId(**CONTEXT_ID)) + response = context_client.ListServices(ContextId(**CONTEXT_ID)) assert len(response.services) == 0 # ----- Clean dependencies used in the test and capture related events --------------------------------------------- - context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R1_ID)) - context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R2_ID)) - context_client_grpc.RemoveTopology(TopologyId(**TOPOLOGY_ID)) - context_client_grpc.RemoveContext(ContextId(**CONTEXT_ID)) + context_client.RemoveDevice(DeviceId(**DEVICE_R1_ID)) + context_client.RemoveDevice(DeviceId(**DEVICE_R2_ID)) + context_client.RemoveTopology(TopologyId(**TOPOLOGY_ID)) + context_client.RemoveContext(ContextId(**CONTEXT_ID)) #events = events_collector.get_events(block=True, count=4) #assert isinstance(events[0], DeviceEvent) #assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - #assert events[0].device_id.device_uuid.uuid == DEVICE_R1_UUID + #assert events[0].device_id.device_uuid.uuid == device_r1_uuid #assert isinstance(events[1], DeviceEvent) #assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - #assert events[1].device_id.device_uuid.uuid == DEVICE_R2_UUID + #assert events[1].device_id.device_uuid.uuid == device_r2_uuid #assert isinstance(events[2], TopologyEvent) #assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - #assert events[2].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - #assert events[2].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID + #assert events[2].topology_id.context_id.context_uuid.uuid == context_uuid + #assert events[2].topology_id.topology_uuid.uuid == topology_uuid #assert isinstance(events[3], ContextEvent) #assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - #assert events[3].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID + #assert events[3].context_id.context_uuid.uuid == context_uuid # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- #events_collector.stop() diff --git a/src/context/tests/test_topology.py b/src/context/tests/test_topology.py index 51b224007..23e73edc8 100644 --- a/src/context/tests/test_topology.py +++ b/src/context/tests/test_topology.py @@ -31,8 +31,7 @@ def test_topology(context_client : ContextClient) -> None: #events_collector.start() # ----- Prepare dependencies for the test and capture related events ----------------------------------------------- - response = context_client.SetContext(Context(**CONTEXT)) - context_uuid = response.context_uuid.uuid + context_client.SetContext(Context(**CONTEXT)) # event = events_collector.get_event(block=True) # assert isinstance(event, ContextEvent) diff --git a/test-context.sh b/test-context.sh index 79a9d5653..47d81817b 100755 --- a/test-context.sh +++ b/test-context.sh @@ -41,11 +41,12 @@ export PYTHONPATH=/home/tfs/tfs-ctrl/src # Run unitary tests and analyze coverage of code at same time # helpful pytest flags: --log-level=INFO -o log_cli=true --verbose --maxfail=1 --durations=0 coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose --maxfail=1 \ - context/tests/test_hasher.py \ - context/tests/test_context.py \ + context/tests/test_hasher.py \ + context/tests/test_context.py \ context/tests/test_topology.py \ - context/tests/test_device.py \ - context/tests/test_link.py + context/tests/test_device.py \ + context/tests/test_link.py \ + context/tests/test_service.py echo echo "Coverage report:" -- GitLab From 763397eb9f5837ae1b0b13e697d0d10cdd873366 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Sat, 7 Jan 2023 04:02:40 +0100 Subject: [PATCH 026/158] Compute component: - Implemented Debug API - Implementing unitary debug API tests --- src/compute/service/__main__.py | 2 + .../nbi_plugins/debug_api/Resources.py | 158 ++++++++++++ .../nbi_plugins/debug_api/__init__.py | 65 +++++ src/compute/tests/MockService_Dependencies.py | 6 +- src/compute/tests/PrepareTestScenario.py | 2 + src/compute/tests/test_debug_api.py | 228 ++++++++++++++++++ 6 files changed, 458 insertions(+), 3 deletions(-) create mode 100644 src/compute/service/rest_server/nbi_plugins/debug_api/Resources.py create mode 100644 src/compute/service/rest_server/nbi_plugins/debug_api/__init__.py create mode 100644 src/compute/tests/test_debug_api.py diff --git a/src/compute/service/__main__.py b/src/compute/service/__main__.py index e80681e17..71db89c65 100644 --- a/src/compute/service/__main__.py +++ b/src/compute/service/__main__.py @@ -20,6 +20,7 @@ from common.Settings import ( wait_for_environment_variables) from .ComputeService import ComputeService from .rest_server.RestServer import RestServer +from .rest_server.nbi_plugins.debug_api import register_debug_api from .rest_server.nbi_plugins.ietf_l2vpn import register_ietf_l2vpn terminate = threading.Event() @@ -57,6 +58,7 @@ def main(): grpc_service.start() rest_server = RestServer() + register_debug_api(rest_server) register_ietf_l2vpn(rest_server) rest_server.start() diff --git a/src/compute/service/rest_server/nbi_plugins/debug_api/Resources.py b/src/compute/service/rest_server/nbi_plugins/debug_api/Resources.py new file mode 100644 index 000000000..a701fd563 --- /dev/null +++ b/src/compute/service/rest_server/nbi_plugins/debug_api/Resources.py @@ -0,0 +1,158 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from flask.json import jsonify +from flask_restful import Resource +from common.proto.context_pb2 import ConnectionId, ContextId, DeviceId, Empty, LinkId, ServiceId, SliceId, TopologyId +from common.proto.policy_pb2 import PolicyRuleId +from common.tools.grpc.Tools import grpc_message_to_json +from common.tools.object_factory.Connection import json_connection_id +from common.tools.object_factory.Context import json_context_id +from common.tools.object_factory.Device import json_device_id +from common.tools.object_factory.Link import json_link_id +from common.tools.object_factory.PolicyRule import json_policy_rule_id +from common.tools.object_factory.Service import json_service_id +from common.tools.object_factory.Slice import json_slice_id +from common.tools.object_factory.Topology import json_topology_id +from context.client.ContextClient import ContextClient + + +def format_grpc_to_json(grpc_reply): + return jsonify(grpc_message_to_json(grpc_reply)) + +def grpc_connection_id(connection_uuid): + return ConnectionId(**json_connection_id(connection_uuid)) + +def grpc_context_id(context_uuid): + return ContextId(**json_context_id(context_uuid)) + +def grpc_device_id(device_uuid): + return DeviceId(**json_device_id(device_uuid)) + +def grpc_link_id(link_uuid): + return LinkId(**json_link_id(link_uuid)) + +def grpc_service_id(context_uuid, service_uuid): + return ServiceId(**json_service_id(service_uuid, context_id=json_context_id(context_uuid))) + +def grpc_slice_id(context_uuid, slice_uuid): + return SliceId(**json_slice_id(slice_uuid, context_id=json_context_id(context_uuid))) + +def grpc_topology_id(context_uuid, topology_uuid): + return TopologyId(**json_topology_id(topology_uuid, context_id=json_context_id(context_uuid))) + +def grpc_policy_rule_id(policy_rule_uuid): + return PolicyRuleId(**json_policy_rule_id(policy_rule_uuid)) + + +class _Resource(Resource): + def __init__(self) -> None: + super().__init__() + self.client = ContextClient() + +class ContextIds(_Resource): + def get(self): + return format_grpc_to_json(self.client.ListContextIds(Empty())) + +class Contexts(_Resource): + def get(self): + return format_grpc_to_json(self.client.ListContexts(Empty())) + +class Context(_Resource): + def get(self, context_uuid : str): + return format_grpc_to_json(self.client.GetContext(grpc_context_id(context_uuid))) + +class TopologyIds(_Resource): + def get(self, context_uuid : str): + return format_grpc_to_json(self.client.ListTopologyIds(grpc_context_id(context_uuid))) + +class Topologies(_Resource): + def get(self, context_uuid : str): + return format_grpc_to_json(self.client.ListTopologies(grpc_context_id(context_uuid))) + +class Topology(_Resource): + def get(self, context_uuid : str, topology_uuid : str): + return format_grpc_to_json(self.client.GetTopology(grpc_topology_id(context_uuid, topology_uuid))) + +class ServiceIds(_Resource): + def get(self, context_uuid : str): + return format_grpc_to_json(self.client.ListServiceIds(grpc_context_id(context_uuid))) + +class Services(_Resource): + def get(self, context_uuid : str): + return format_grpc_to_json(self.client.ListServices(grpc_context_id(context_uuid))) + +class Service(_Resource): + def get(self, context_uuid : str, service_uuid : str): + return format_grpc_to_json(self.client.GetService(grpc_service_id(context_uuid, service_uuid))) + +class SliceIds(_Resource): + def get(self, context_uuid : str): + return format_grpc_to_json(self.client.ListSliceIds(grpc_context_id(context_uuid))) + +class Slices(_Resource): + def get(self, context_uuid : str): + return format_grpc_to_json(self.client.ListSlices(grpc_context_id(context_uuid))) + +class Slice(_Resource): + def get(self, context_uuid : str, slice_uuid : str): + return format_grpc_to_json(self.client.GetSlice(grpc_slice_id(context_uuid, slice_uuid))) + +class DeviceIds(_Resource): + def get(self): + return format_grpc_to_json(self.client.ListDeviceIds(Empty())) + +class Devices(_Resource): + def get(self): + return format_grpc_to_json(self.client.ListDevices(Empty())) + +class Device(_Resource): + def get(self, device_uuid : str): + return format_grpc_to_json(self.client.GetDevice(grpc_device_id(device_uuid))) + +class LinkIds(_Resource): + def get(self): + return format_grpc_to_json(self.client.ListLinkIds(Empty())) + +class Links(_Resource): + def get(self): + return format_grpc_to_json(self.client.ListLinks(Empty())) + +class Link(_Resource): + def get(self, link_uuid : str): + return format_grpc_to_json(self.client.GetLink(grpc_link_id(link_uuid))) + +class ConnectionIds(_Resource): + def get(self, context_uuid : str, service_uuid : str): + return format_grpc_to_json(self.client.ListConnectionIds(grpc_service_id(context_uuid, service_uuid))) + +class Connections(_Resource): + def get(self, context_uuid : str, service_uuid : str): + return format_grpc_to_json(self.client.ListConnections(grpc_service_id(context_uuid, service_uuid))) + +class Connection(_Resource): + def get(self, connection_uuid : str): + return format_grpc_to_json(self.client.GetConnection(grpc_connection_id(connection_uuid))) + +class PolicyRuleIds(_Resource): + def get(self): + return format_grpc_to_json(self.client.ListPolicyRuleIds(Empty())) + +class PolicyRules(_Resource): + def get(self): + return format_grpc_to_json(self.client.ListPolicyRules(Empty())) + +class PolicyRule(_Resource): + def get(self, policy_rule_uuid : str): + return format_grpc_to_json(self.client.GetPolicyRule(grpc_policy_rule_id(policy_rule_uuid))) diff --git a/src/compute/service/rest_server/nbi_plugins/debug_api/__init__.py b/src/compute/service/rest_server/nbi_plugins/debug_api/__init__.py new file mode 100644 index 000000000..4fca3b534 --- /dev/null +++ b/src/compute/service/rest_server/nbi_plugins/debug_api/__init__.py @@ -0,0 +1,65 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# RFC 8466 - L2VPN Service Model (L2SM) +# Ref: https://datatracker.ietf.org/doc/html/rfc8466 + +from compute.service.rest_server.RestServer import RestServer +from .Resources import ( + Connection, ConnectionIds, Connections, Context, ContextIds, Contexts, Device, DeviceIds, Devices, Link, LinkIds, + Links, PolicyRule, PolicyRuleIds, PolicyRules, Service, ServiceIds, Services, Slice, SliceIds, Slices, Topologies, + Topology, TopologyIds) + +URL_PREFIX = '/api' + +# Use 'path' type in Service and Sink because service_uuid and link_uuid might contain char '/' and Flask is unable to +# recognize them in 'string' type. +RESOURCES = [ + # (endpoint_name, resource_class, resource_url) + ('api.context_ids', ContextIds, '/context_ids'), + ('api.contexts', Contexts, '/contexts'), + ('api.context', Context, '/context/<string:context_uuid>'), + + ('api.topology_ids', TopologyIds, '/context/<string:context_uuid>/topology_ids'), + ('api.topologies', Topologies, '/context/<string:context_uuid>/topologies'), + ('api.topology', Topology, '/context/<string:context_uuid>/topology/<string:topology_uuid>'), + + ('api.service_ids', ServiceIds, '/context/<string:context_uuid>/service_ids'), + ('api.services', Services, '/context/<string:context_uuid>/services'), + ('api.service', Service, '/context/<string:context_uuid>/service/<path:service_uuid>'), + + ('api.slice_ids', SliceIds, '/context/<string:context_uuid>/slice_ids'), + ('api.slices', Slices, '/context/<string:context_uuid>/slices'), + ('api.slice', Slice, '/context/<string:context_uuid>/slice/<path:slice_uuid>'), + + ('api.device_ids', DeviceIds, '/device_ids'), + ('api.devices', Devices, '/devices'), + ('api.device', Device, '/device/<string:device_uuid>'), + + ('api.link_ids', LinkIds, '/link_ids'), + ('api.links', Links, '/links'), + ('api.link', Link, '/link/<path:link_uuid>'), + + ('api.connection_ids', ConnectionIds, '/context/<string:context_uuid>/service/<path:service_uuid>/connection_ids'), + ('api.connections', Connections, '/context/<string:context_uuid>/service/<path:service_uuid>/connections'), + ('api.connection', Connection, '/connection/<path:connection_uuid>'), + + ('api.policyrule_ids', PolicyRuleIds, '/policyrule_ids'), + ('api.policyrules', PolicyRules, '/policyrules'), + ('api.policyrule', PolicyRule, '/policyrule/<string:policyrule_uuid>'), +] + +def register_debug_api(rest_server : RestServer): + for endpoint_name, resource_class, resource_url in RESOURCES: + rest_server.add_resource(resource_class, URL_PREFIX + resource_url, endpoint=endpoint_name) diff --git a/src/compute/tests/MockService_Dependencies.py b/src/compute/tests/MockService_Dependencies.py index 5ed9d4da9..fbc4bd1a4 100644 --- a/src/compute/tests/MockService_Dependencies.py +++ b/src/compute/tests/MockService_Dependencies.py @@ -28,7 +28,7 @@ LOCAL_HOST = '127.0.0.1' SERVICE_CONTEXT = ServiceNameEnum.CONTEXT SERVICE_SERVICE = ServiceNameEnum.SERVICE -SERVICE_SLICE = ServiceNameEnum.SLICE +SERVICE_SLICE = ServiceNameEnum.SLICE class MockService_Dependencies(GenericGrpcService): # Mock Service implementing Context, Service and Slice to simplify unitary tests of Compute @@ -54,5 +54,5 @@ class MockService_Dependencies(GenericGrpcService): os.environ[get_env_var_name(SERVICE_SERVICE, ENVVAR_SUFIX_SERVICE_HOST )] = str(self.bind_address) os.environ[get_env_var_name(SERVICE_SERVICE, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(self.bind_port) - os.environ[get_env_var_name(SERVICE_SLICE, ENVVAR_SUFIX_SERVICE_HOST )] = str(self.bind_address) - os.environ[get_env_var_name(SERVICE_SLICE, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(self.bind_port) + os.environ[get_env_var_name(SERVICE_SLICE, ENVVAR_SUFIX_SERVICE_HOST )] = str(self.bind_address) + os.environ[get_env_var_name(SERVICE_SLICE, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(self.bind_port) diff --git a/src/compute/tests/PrepareTestScenario.py b/src/compute/tests/PrepareTestScenario.py index 06fb34f9e..7ef99f4b1 100644 --- a/src/compute/tests/PrepareTestScenario.py +++ b/src/compute/tests/PrepareTestScenario.py @@ -17,6 +17,7 @@ from common.Constants import ServiceNameEnum from common.Settings import ( ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_HTTP, get_env_var_name, get_service_port_http) from compute.service.rest_server.RestServer import RestServer +from compute.service.rest_server.nbi_plugins.debug_api import register_debug_api from compute.service.rest_server.nbi_plugins.ietf_l2vpn import register_ietf_l2vpn from compute.tests.MockService_Dependencies import MockService_Dependencies from tests.tools.mock_osm.MockOSM import MockOSM @@ -39,6 +40,7 @@ def mock_service(): @pytest.fixture(scope='session') def compute_service_rest(mock_service): # pylint: disable=redefined-outer-name _rest_server = RestServer() + register_debug_api(_rest_server) register_ietf_l2vpn(_rest_server) _rest_server.start() time.sleep(1) # bring time for the server to start diff --git a/src/compute/tests/test_debug_api.py b/src/compute/tests/test_debug_api.py new file mode 100644 index 000000000..31d204965 --- /dev/null +++ b/src/compute/tests/test_debug_api.py @@ -0,0 +1,228 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, os, pytest, requests, time, urllib +from typing import Tuple +from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID, ServiceNameEnum +from common.proto.context_pb2 import Connection, Context, Device, Link, Service, Slice, Topology +from common.proto.policy_pb2 import PolicyRuleIdList, PolicyRuleId, PolicyRuleList, PolicyRule +from common.Settings import ( + ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, ENVVAR_SUFIX_SERVICE_PORT_HTTP, get_env_var_name, + get_service_baseurl_http, get_service_port_grpc, get_service_port_http) +from common.type_checkers.Assertions import ( + validate_connection, validate_connection_ids, validate_connections, validate_context, validate_context_ids, + validate_contexts, validate_device, validate_device_ids, validate_devices, validate_link, validate_link_ids, + validate_links, validate_service, validate_service_ids, validate_services, validate_topologies, validate_topology, + validate_topology_ids) +from context.client.ContextClient import ContextClient +from .MockService_Dependencies import MockService_Dependencies +from .Objects import ( + CONNECTION_R1_R3, CONNECTION_R1_R3_ID, CONNECTION_R1_R3_UUID, CONTEXT, CONTEXT_ID, DEVICE_R1, DEVICE_R1_ID, + DEVICE_R1_UUID, DEVICE_R2, DEVICE_R2_ID, DEVICE_R2_UUID, DEVICE_R3, DEVICE_R3_ID, DEVICE_R3_UUID, LINK_R1_R2, + LINK_R1_R2_ID, LINK_R1_R2_UUID, SERVICE_R1_R2, SERVICE_R1_R2_ID, SERVICE_R1_R2_UUID, SERVICE_R1_R3, + SERVICE_R1_R3_ID, SERVICE_R1_R3_UUID, SERVICE_R2_R3, SERVICE_R2_R3_ID, SERVICE_R2_R3_UUID, SLICE_R1_R3, TOPOLOGY, + TOPOLOGY_ID, POLICY_RULE, POLICY_RULE_ID, POLICY_RULE_UUID) + + +@pytest.fixture(scope='session') +def mock_service(): + _service = MockService_Dependencies(MOCKSERVICE_PORT) + _service.configure_env_vars() + _service.start() + yield _service + _service.stop() + + + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +LOCAL_HOST = '127.0.0.1' +GRPC_PORT = 10000 + int(get_service_port_grpc(ServiceNameEnum.CONTEXT)) # avoid privileged ports +HTTP_PORT = 10000 + int(get_service_port_http(ServiceNameEnum.CONTEXT)) # avoid privileged ports + +MOCKSERVICE_PORT = 10000 +DEVICE_SERVICE_PORT = MOCKSERVICE_PORT + get_service_port_grpc(ServiceNameEnum.DEVICE) # avoid privileged ports + +os.environ[get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_HOST )] = str(LOCAL_HOST) +os.environ[get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(GRPC_PORT) +os.environ[get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_HTTP)] = str(HTTP_PORT) + +@pytest.fixture(scope='session') +def context_service_grpc(): + _service = ContextService(context_s_mb[0], context_s_mb[1]) + _service.start() + yield _service + _service.stop() + +@pytest.fixture(scope='session') +def context_service_rest(): + database = context_db_mb[0] + _rest_server = RestServer() + for endpoint_name, resource_class, resource_url in RESOURCES: + _rest_server.add_resource(resource_class, resource_url, endpoint=endpoint_name, resource_class_args=(database,)) + _rest_server.start() + time.sleep(1) # bring time for the server to start + yield _rest_server + _rest_server.shutdown() + _rest_server.join() + +@pytest.fixture(scope='session') +def context_client_grpc(context_service_grpc : ContextService): # pylint: disable=redefined-outer-name + _client = ContextClient() + yield _client + _client.close() + +def test_populate_database(): + client = ContextClient(host=LOCAL_HOST, port=GRPC_PORT) + client.SetContext(Context(**CONTEXT)) + client.SetTopology(Topology(**TOPOLOGY)) + client.SetDevice(Device(**DEVICE_R1)) + client.SetDevice(Device(**DEVICE_R2)) + client.SetDevice(Device(**DEVICE_R3)) + client.SetLink(Link(**LINK_R1_R2)) + client.SetLink(Link(**LINK_R1_R3)) + client.SetLink(Link(**LINK_R2_R3)) + client.SetService(Service(**SERVICE_R1_R2)) + client.SetService(Service(**SERVICE_R1_R3)) + client.SetService(Service(**SERVICE_R2_R3)) + client.SetSlice(Slice(**SLICE_R1_R3)) + client.SetConnection(Connection(**CONNECTION_R1_R3)) + +def do_rest_request(url : str): + base_url = get_service_baseurl_http(ServiceNameEnum.CONTEXT) + request_url = 'http://{:s}:{:s}{:s}{:s}'.format(str(LOCAL_HOST), str(HTTP_PORT), str(base_url), url) + LOGGER.warning('Request: GET {:s}'.format(str(request_url))) + reply = requests.get(request_url) + LOGGER.warning('Reply: {:s}'.format(str(reply.text))) + assert reply.status_code == 200, 'Reply failed with code {}'.format(reply.status_code) + return reply.json() + + +def test_rest_get_context_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name + reply = do_rest_request('/context_ids') + validate_context_ids(reply) + +def test_rest_get_contexts(context_service_rest : RestServer): # pylint: disable=redefined-outer-name + reply = do_rest_request('/contexts') + validate_contexts(reply) + +def test_rest_get_context(context_service_rest : RestServer): # pylint: disable=redefined-outer-name + context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) + reply = do_rest_request('/context/{:s}'.format(context_uuid)) + validate_context(reply) + +def test_rest_get_topology_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name + context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) + reply = do_rest_request('/context/{:s}/topology_ids'.format(context_uuid)) + validate_topology_ids(reply) + +def test_rest_get_topologies(context_service_rest : RestServer): # pylint: disable=redefined-outer-name + context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) + reply = do_rest_request('/context/{:s}/topologies'.format(context_uuid)) + validate_topologies(reply) + +def test_rest_get_topology(context_service_rest : RestServer): # pylint: disable=redefined-outer-name + context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) + topology_uuid = urllib.parse.quote(DEFAULT_TOPOLOGY_UUID) + reply = do_rest_request('/context/{:s}/topology/{:s}'.format(context_uuid, topology_uuid)) + validate_topology(reply, num_devices=3, num_links=3) + +def test_rest_get_service_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name + context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) + reply = do_rest_request('/context/{:s}/service_ids'.format(context_uuid)) + validate_service_ids(reply) + +def test_rest_get_services(context_service_rest : RestServer): # pylint: disable=redefined-outer-name + context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) + reply = do_rest_request('/context/{:s}/services'.format(context_uuid)) + validate_services(reply) + +def test_rest_get_service(context_service_rest : RestServer): # pylint: disable=redefined-outer-name + context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) + service_uuid = urllib.parse.quote(SERVICE_R1_R2_UUID, safe='') + reply = do_rest_request('/context/{:s}/service/{:s}'.format(context_uuid, service_uuid)) + validate_service(reply) + +def test_rest_get_slice_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name + context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) + reply = do_rest_request('/context/{:s}/slice_ids'.format(context_uuid)) + #validate_slice_ids(reply) + +def test_rest_get_slices(context_service_rest : RestServer): # pylint: disable=redefined-outer-name + context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) + reply = do_rest_request('/context/{:s}/slices'.format(context_uuid)) + #validate_slices(reply) + +def test_rest_get_slice(context_service_rest : RestServer): # pylint: disable=redefined-outer-name + context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) + slice_uuid = urllib.parse.quote(SLICE_R1_R3_UUID, safe='') + reply = do_rest_request('/context/{:s}/slice/{:s}'.format(context_uuid, slice_uuid)) + #validate_slice(reply) + +def test_rest_get_device_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name + reply = do_rest_request('/device_ids') + validate_device_ids(reply) + +def test_rest_get_devices(context_service_rest : RestServer): # pylint: disable=redefined-outer-name + reply = do_rest_request('/devices') + validate_devices(reply) + +def test_rest_get_device(context_service_rest : RestServer): # pylint: disable=redefined-outer-name + device_uuid = urllib.parse.quote(DEVICE_R1_UUID, safe='') + reply = do_rest_request('/device/{:s}'.format(device_uuid)) + validate_device(reply) + +def test_rest_get_link_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name + reply = do_rest_request('/link_ids') + validate_link_ids(reply) + +def test_rest_get_links(context_service_rest : RestServer): # pylint: disable=redefined-outer-name + reply = do_rest_request('/links') + validate_links(reply) + +def test_rest_get_link(context_service_rest : RestServer): # pylint: disable=redefined-outer-name + link_uuid = urllib.parse.quote(LINK_R1_R2_UUID, safe='') + reply = do_rest_request('/link/{:s}'.format(link_uuid)) + validate_link(reply) + +def test_rest_get_connection_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name + context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) + service_uuid = urllib.parse.quote(SERVICE_R1_R3_UUID, safe='') + reply = do_rest_request('/context/{:s}/service/{:s}/connection_ids'.format(context_uuid, service_uuid)) + validate_connection_ids(reply) + +def test_rest_get_connections(context_service_rest : RestServer): # pylint: disable=redefined-outer-name + context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) + service_uuid = urllib.parse.quote(SERVICE_R1_R3_UUID, safe='') + reply = do_rest_request('/context/{:s}/service/{:s}/connections'.format(context_uuid, service_uuid)) + validate_connections(reply) + +def test_rest_get_connection(context_service_rest : RestServer): # pylint: disable=redefined-outer-name + connection_uuid = urllib.parse.quote(CONNECTION_R1_R3_UUID, safe='') + reply = do_rest_request('/connection/{:s}'.format(connection_uuid)) + validate_connection(reply) + +def test_rest_get_policyrule_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name + reply = do_rest_request('/policyrule_ids') + #validate_policyrule_ids(reply) + +def test_rest_get_policyrules(context_service_rest : RestServer): # pylint: disable=redefined-outer-name + reply = do_rest_request('/policyrules') + #validate_policyrules(reply) + +def test_rest_get_policyrule(context_service_rest : RestServer): # pylint: disable=redefined-outer-name + policyrule_uuid = urllib.parse.quote(POLICYRULE_UUID, safe='') + reply = do_rest_request('/policyrule/{:s}'.format(policyrule_uuid)) + #validate_policyrule(reply) -- GitLab From d2316472d8e646d9d16e9b780c4eaadb7115403d Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Sat, 7 Jan 2023 04:06:18 +0100 Subject: [PATCH 027/158] Context component: - removed old code - implemented basic support for Slice entity - implemented detailed perf eval report in unitary tests --- .../service/ContextServiceServicerImpl.py | 198 +-- src/context/service/_old_code/Config.py | 16 - src/context/service/_old_code/Populate.py | 49 - src/context/service/_old_code/Resources.py | 246 --- src/context/service/_old_code/__init__.py | 14 - src/context/service/_old_code/__main__.py | 85 - .../service/_old_code/_test_restapi.py | 31 - src/context/service/_old_code/test_unitary.py | 1450 ----------------- src/context/service/database/Context.py | 3 +- src/context/service/database/Device.py | 7 +- src/context/service/database/Link.py | 5 +- src/context/service/database/Service.py | 7 +- src/context/service/database/Slice.py | 216 +++ src/context/service/database/Topology.py | 4 +- .../database/models/ConfigRuleModel.py | 4 +- .../database/models/ConstraintModel.py | 4 +- .../service/database/models/ContextModel.py | 4 +- .../service/database/models/RelationModels.py | 36 +- .../service/database/models/SliceModel.py | 155 +- .../models/enums/SliceStatus.py} | 20 +- src/context/service/database/uuids/Slice.py | 37 + src/context/tests/Objects.py | 33 +- src/context/tests/__test_unitary.py | 55 - src/context/tests/_test_slice.py | 0 src/context/tests/conftest.py | 39 +- src/context/tests/test_slice.py | 272 ++++ test-context.sh | 3 +- 27 files changed, 728 insertions(+), 2265 deletions(-) delete mode 100644 src/context/service/_old_code/Config.py delete mode 100644 src/context/service/_old_code/Populate.py delete mode 100644 src/context/service/_old_code/Resources.py delete mode 100644 src/context/service/_old_code/__init__.py delete mode 100644 src/context/service/_old_code/__main__.py delete mode 100644 src/context/service/_old_code/_test_restapi.py delete mode 100644 src/context/service/_old_code/test_unitary.py create mode 100644 src/context/service/database/Slice.py rename src/context/service/{_old_code/RestServer.py => database/models/enums/SliceStatus.py} (54%) create mode 100644 src/context/service/database/uuids/Slice.py delete mode 100644 src/context/tests/__test_unitary.py delete mode 100644 src/context/tests/_test_slice.py create mode 100644 src/context/tests/test_slice.py diff --git a/src/context/service/ContextServiceServicerImpl.py b/src/context/service/ContextServiceServicerImpl.py index edb5095b9..d93a8f059 100644 --- a/src/context/service/ContextServiceServicerImpl.py +++ b/src/context/service/ContextServiceServicerImpl.py @@ -18,7 +18,6 @@ import grpc, json, logging, sqlalchemy #from sqlalchemy.dialects.postgresql import UUID, insert from typing import Iterator from common.message_broker.MessageBroker import MessageBroker -#from common.orm.backend.Tools import key_to_str from common.proto.context_pb2 import ( Connection, ConnectionEvent, ConnectionId, ConnectionIdList, ConnectionList, Context, ContextEvent, ContextId, ContextIdList, ContextList, @@ -39,6 +38,7 @@ from .database.Context import context_delete, context_get, context_list_ids, con from .database.Device import device_delete, device_get, device_list_ids, device_list_objs, device_set from .database.Link import link_delete, link_get, link_list_ids, link_list_objs, link_set from .database.Service import service_delete, service_get, service_list_ids, service_list_objs, service_set +from .database.Slice import slice_delete, slice_get, slice_list_ids, slice_list_objs, slice_set, slice_unset from .database.Topology import topology_delete, topology_get, topology_list_ids, topology_list_objs, topology_set #from common.tools.grpc.Tools import grpc_message_to_json, grpc_message_to_json_string #from context.service.Database import Database @@ -265,180 +265,38 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # ----- Slice ---------------------------------------------------------------------------------------------------- -# @safe_and_metered_rpc_method(METRICS, LOGGER) -# def ListSliceIds(self, request : ContextId, context : grpc.ServicerContext) -> SliceIdList: -# with self.lock: -# db_context : ContextModel = get_object(self.database, ContextModel, request.context_uuid.uuid) -# db_slices : Set[SliceModel] = get_related_objects(db_context, SliceModel) -# db_slices = sorted(db_slices, key=operator.attrgetter('pk')) -# return SliceIdList(slice_ids=[db_slice.dump_id() for db_slice in db_slices]) + @safe_and_metered_rpc_method(METRICS, LOGGER) + def ListSliceIds(self, request : ContextId, context : grpc.ServicerContext) -> SliceIdList: + return slice_list_ids(self.db_engine, request) -# @safe_and_metered_rpc_method(METRICS, LOGGER) -# def ListSlices(self, request : ContextId, context : grpc.ServicerContext) -> SliceList: -# with self.lock: -# db_context : ContextModel = get_object(self.database, ContextModel, request.context_uuid.uuid) -# db_slices : Set[SliceModel] = get_related_objects(db_context, SliceModel) -# db_slices = sorted(db_slices, key=operator.attrgetter('pk')) -# return SliceList(slices=[db_slice.dump() for db_slice in db_slices]) + @safe_and_metered_rpc_method(METRICS, LOGGER) + def ListSlices(self, request : ContextId, context : grpc.ServicerContext) -> SliceList: + return slice_list_objs(self.db_engine, request) -# @safe_and_metered_rpc_method(METRICS, LOGGER) -# def GetSlice(self, request : SliceId, context : grpc.ServicerContext) -> Slice: -# with self.lock: -# str_key = key_to_str([request.context_id.context_uuid.uuid, request.slice_uuid.uuid]) -# db_slice : SliceModel = get_object(self.database, SliceModel, str_key) -# return Slice(**db_slice.dump( -# include_endpoint_ids=True, include_constraints=True, include_config_rules=True, -# include_service_ids=True, include_subslice_ids=True)) + @safe_and_metered_rpc_method(METRICS, LOGGER) + def GetSlice(self, request : SliceId, context : grpc.ServicerContext) -> Slice: + return slice_get(self.db_engine, request) -# @safe_and_metered_rpc_method(METRICS, LOGGER) -# def SetSlice(self, request : Slice, context : grpc.ServicerContext) -> SliceId: -# with self.lock: -# context_uuid = request.slice_id.context_id.context_uuid.uuid -# db_context : ContextModel = get_object(self.database, ContextModel, context_uuid) -# -# for i,endpoint_id in enumerate(request.slice_endpoint_ids): -# endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid -# if len(endpoint_topology_context_uuid) > 0 and context_uuid != endpoint_topology_context_uuid: -# raise InvalidArgumentException( -# 'request.slice_endpoint_ids[{:d}].topology_id.context_id.context_uuid.uuid'.format(i), -# endpoint_topology_context_uuid, -# ['should be == {:s}({:s})'.format( -# 'request.slice_id.context_id.context_uuid.uuid', context_uuid)]) -# -# slice_uuid = request.slice_id.slice_uuid.uuid -# str_slice_key = key_to_str([context_uuid, slice_uuid]) -# -# constraints_result = set_constraints( -# self.database, str_slice_key, 'slice', request.slice_constraints) -# db_constraints = constraints_result[0][0] -# -# running_config_rules = update_config( -# self.database, str_slice_key, 'slice', request.slice_config.config_rules) -# db_running_config = running_config_rules[0][0] -# -# result : Tuple[SliceModel, bool] = update_or_create_object(self.database, SliceModel, str_slice_key, { -# 'context_fk' : db_context, -# 'slice_uuid' : slice_uuid, -# 'slice_constraints_fk': db_constraints, -# 'slice_status' : grpc_to_enum__slice_status(request.slice_status.slice_status), -# 'slice_config_fk' : db_running_config, -# 'slice_owner_uuid' : request.slice_owner.owner_uuid.uuid, -# 'slice_owner_string' : request.slice_owner.owner_string, -# }) -# db_slice, updated = result -# -# for i,endpoint_id in enumerate(request.slice_endpoint_ids): -# endpoint_uuid = endpoint_id.endpoint_uuid.uuid -# endpoint_device_uuid = endpoint_id.device_id.device_uuid.uuid -# endpoint_topology_uuid = endpoint_id.topology_id.topology_uuid.uuid -# endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid -# -# str_endpoint_key = key_to_str([endpoint_device_uuid, endpoint_uuid]) -# if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0: -# str_topology_key = key_to_str([endpoint_topology_context_uuid, endpoint_topology_uuid]) -# str_endpoint_key = key_to_str([str_endpoint_key, str_topology_key], separator=':') -# -# db_endpoint : EndPointModel = get_object(self.database, EndPointModel, str_endpoint_key) -# -# str_slice_endpoint_key = key_to_str([str_slice_key, str_endpoint_key], separator='--') -# result : Tuple[SliceEndPointModel, bool] = get_or_create_object( -# self.database, SliceEndPointModel, str_slice_endpoint_key, { -# 'slice_fk': db_slice, 'endpoint_fk': db_endpoint}) -# #db_slice_endpoint, slice_endpoint_created = result -# -# for i,service_id in enumerate(request.slice_service_ids): -# service_uuid = service_id.service_uuid.uuid -# service_context_uuid = service_id.context_id.context_uuid.uuid -# str_service_key = key_to_str([service_context_uuid, service_uuid]) -# db_service : ServiceModel = get_object(self.database, ServiceModel, str_service_key) -# -# str_slice_service_key = key_to_str([str_slice_key, str_service_key], separator='--') -# result : Tuple[SliceServiceModel, bool] = get_or_create_object( -# self.database, SliceServiceModel, str_slice_service_key, { -# 'slice_fk': db_slice, 'service_fk': db_service}) -# #db_slice_service, slice_service_created = result -# -# for i,subslice_id in enumerate(request.slice_subslice_ids): -# subslice_uuid = subslice_id.slice_uuid.uuid -# subslice_context_uuid = subslice_id.context_id.context_uuid.uuid -# str_subslice_key = key_to_str([subslice_context_uuid, subslice_uuid]) -# db_subslice : SliceModel = get_object(self.database, SliceModel, str_subslice_key) -# -# str_slice_subslice_key = key_to_str([str_slice_key, str_subslice_key], separator='--') -# result : Tuple[SliceSubSliceModel, bool] = get_or_create_object( -# self.database, SliceSubSliceModel, str_slice_subslice_key, { -# 'slice_fk': db_slice, 'sub_slice_fk': db_subslice}) -# #db_slice_subslice, slice_subslice_created = result -# -# event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE -# dict_slice_id = db_slice.dump_id() -# notify_event(self.messagebroker, TOPIC_SLICE, event_type, {'slice_id': dict_slice_id}) -# return SliceId(**dict_slice_id) + @safe_and_metered_rpc_method(METRICS, LOGGER) + def SetSlice(self, request : Slice, context : grpc.ServicerContext) -> SliceId: + slice_id,updated = slice_set(self.db_engine, request) + #event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE + #notify_event(self.messagebroker, TOPIC_SLICE, event_type, {'slice_id': slice_id}) + return slice_id -# @safe_and_metered_rpc_method(METRICS, LOGGER) -# def UnsetSlice(self, request : Slice, context : grpc.ServicerContext) -> SliceId: -# with self.lock: -# context_uuid = request.slice_id.context_id.context_uuid.uuid -# db_context : ContextModel = get_object(self.database, ContextModel, context_uuid) -# -# for i,endpoint_id in enumerate(request.slice_endpoint_ids): -# endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid -# if len(endpoint_topology_context_uuid) > 0 and context_uuid != endpoint_topology_context_uuid: -# raise InvalidArgumentException( -# 'request.slice_endpoint_ids[{:d}].topology_id.context_id.context_uuid.uuid'.format(i), -# endpoint_topology_context_uuid, -# ['should be == {:s}({:s})'.format( -# 'request.slice_id.context_id.context_uuid.uuid', context_uuid)]) -# -# slice_uuid = request.slice_id.slice_uuid.uuid -# str_slice_key = key_to_str([context_uuid, slice_uuid]) -# -# if len(request.slice_constraints) > 0: -# raise NotImplementedError('UnsetSlice: removal of constraints') -# if len(request.slice_config.config_rules) > 0: -# raise NotImplementedError('UnsetSlice: removal of config rules') -# if len(request.slice_endpoint_ids) > 0: -# raise NotImplementedError('UnsetSlice: removal of endpoints') -# -# updated = False -# -# for service_id in request.slice_service_ids: -# service_uuid = service_id.service_uuid.uuid -# service_context_uuid = service_id.context_id.context_uuid.uuid -# str_service_key = key_to_str([service_context_uuid, service_uuid]) -# str_slice_service_key = key_to_str([str_slice_key, str_service_key], separator='--') -# SliceServiceModel(self.database, str_slice_service_key).delete() -# updated = True -# -# for subslice_id in request.slice_subslice_ids: -# subslice_uuid = subslice_id.slice_uuid.uuid -# subslice_context_uuid = subslice_id.context_id.context_uuid.uuid -# str_subslice_key = key_to_str([subslice_context_uuid, subslice_uuid]) -# str_slice_subslice_key = key_to_str([str_slice_key, str_subslice_key], separator='--') -# SliceSubSliceModel(self.database, str_slice_subslice_key).delete() -# updated = True -# -# event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE -# db_slice : SliceModel = get_object(self.database, SliceModel, str_slice_key) -# dict_slice_id = db_slice.dump_id() -# notify_event(self.messagebroker, TOPIC_SLICE, event_type, {'slice_id': dict_slice_id}) -# return SliceId(**dict_slice_id) + @safe_and_metered_rpc_method(METRICS, LOGGER) + def UnsetSlice(self, request : Slice, context : grpc.ServicerContext) -> SliceId: + slice_id,updated = slice_unset(self.db_engine, request) + #if updated: + # notify_event(self.messagebroker, TOPIC_SLICE, EventTypeEnum.EVENTTYPE_UPDATE, {'slice_id': slice_id}) + return slice_id -# @safe_and_metered_rpc_method(METRICS, LOGGER) -# def RemoveSlice(self, request : SliceId, context : grpc.ServicerContext) -> Empty: -# with self.lock: -# context_uuid = request.context_id.context_uuid.uuid -# slice_uuid = request.slice_uuid.uuid -# db_slice = SliceModel(self.database, key_to_str([context_uuid, slice_uuid]), auto_load=False) -# found = db_slice.load() -# if not found: return Empty() -# -# dict_slice_id = db_slice.dump_id() -# db_slice.delete() -# -# event_type = EventTypeEnum.EVENTTYPE_REMOVE -# notify_event(self.messagebroker, TOPIC_SLICE, event_type, {'slice_id': dict_slice_id}) -# return Empty() + @safe_and_metered_rpc_method(METRICS, LOGGER) + def RemoveSlice(self, request : SliceId, context : grpc.ServicerContext) -> Empty: + deleted = slice_delete(self.db_engine, request) + #if deleted: + # notify_event(self.messagebroker, TOPIC_SLICE, EventTypeEnum.EVENTTYPE_REMOVE, {'slice_id': request}) + return Empty() @safe_and_metered_rpc_method(METRICS, LOGGER) def GetSliceEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[SliceEvent]: diff --git a/src/context/service/_old_code/Config.py b/src/context/service/_old_code/Config.py deleted file mode 100644 index 6f5d1dc0b..000000000 --- a/src/context/service/_old_code/Config.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Autopopulate the component with fake data for testing purposes? -POPULATE_FAKE_DATA = False diff --git a/src/context/service/_old_code/Populate.py b/src/context/service/_old_code/Populate.py deleted file mode 100644 index ffb739988..000000000 --- a/src/context/service/_old_code/Populate.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import copy -from common.proto.context_pb2 import Connection, Context, Device, Link, Service, Topology -from context.client.ContextClient import ContextClient -from context.tests.Objects import ( - CONNECTION_R1_R3, CONTEXT, TOPOLOGY, DEVICE_R1, DEVICE_R1_ID, DEVICE_R2, DEVICE_R2_ID, DEVICE_R3, DEVICE_R3_ID, - LINK_R1_R2, LINK_R1_R2_ID, LINK_R1_R3, LINK_R1_R3_ID, LINK_R2_R3, LINK_R2_R3_ID, SERVICE_R1_R2, SERVICE_R1_R3, - SERVICE_R2_R3) - -def populate(host=None, port=None): - client = ContextClient(host=host, port=port) - - client.SetContext(Context(**CONTEXT)) - client.SetTopology(Topology(**TOPOLOGY)) - client.SetDevice(Device(**DEVICE_R1)) - client.SetDevice(Device(**DEVICE_R2)) - client.SetDevice(Device(**DEVICE_R3)) - - client.SetLink(Link(**LINK_R1_R2)) - client.SetLink(Link(**LINK_R1_R3)) - client.SetLink(Link(**LINK_R2_R3)) - - TOPOLOGY_WITH_DEVICES_AND_LINKS = copy.deepcopy(TOPOLOGY) - TOPOLOGY_WITH_DEVICES_AND_LINKS['device_ids'].append(DEVICE_R1_ID) - TOPOLOGY_WITH_DEVICES_AND_LINKS['device_ids'].append(DEVICE_R2_ID) - TOPOLOGY_WITH_DEVICES_AND_LINKS['device_ids'].append(DEVICE_R3_ID) - TOPOLOGY_WITH_DEVICES_AND_LINKS['link_ids'].append(LINK_R1_R2_ID) - TOPOLOGY_WITH_DEVICES_AND_LINKS['link_ids'].append(LINK_R1_R3_ID) - TOPOLOGY_WITH_DEVICES_AND_LINKS['link_ids'].append(LINK_R2_R3_ID) - client.SetTopology(Topology(**TOPOLOGY_WITH_DEVICES_AND_LINKS)) - - client.SetService(Service(**SERVICE_R1_R2)) - client.SetService(Service(**SERVICE_R2_R3)) - - client.SetService(Service(**SERVICE_R1_R3)) - client.SetConnection(Connection(**CONNECTION_R1_R3)) diff --git a/src/context/service/_old_code/Resources.py b/src/context/service/_old_code/Resources.py deleted file mode 100644 index 5f03132a3..000000000 --- a/src/context/service/_old_code/Resources.py +++ /dev/null @@ -1,246 +0,0 @@ -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from flask import make_response -from flask.json import jsonify -from flask_restful import Resource -from common.orm.Database import Database -from common.proto.context_pb2 import ConnectionId, ContextId, DeviceId, Empty, LinkId, ServiceId, SliceId, TopologyId -from common.proto.policy_pb2 import PolicyRuleId -from common.tools.grpc.Tools import grpc_message_to_json -from context.service.grpc_server.ContextServiceServicerImpl import ContextServiceServicerImpl - -def format_grpc_to_json(grpc_reply): - return jsonify(grpc_message_to_json(grpc_reply)) - -def grpc_connection_id(connection_uuid): - return ConnectionId(**{ - 'connection_uuid': {'uuid': connection_uuid} - }) - -def grpc_context_id(context_uuid): - return ContextId(**{ - 'context_uuid': {'uuid': context_uuid} - }) - -def grpc_device_id(device_uuid): - return DeviceId(**{ - 'device_uuid': {'uuid': device_uuid} - }) - -def grpc_link_id(link_uuid): - return LinkId(**{ - 'link_uuid': {'uuid': link_uuid} - }) - -def grpc_service_id(context_uuid, service_uuid): - return ServiceId(**{ - 'context_id': {'context_uuid': {'uuid': context_uuid}}, - 'service_uuid': {'uuid': service_uuid} - }) - -def grpc_slice_id(context_uuid, slice_uuid): - return SliceId(**{ - 'context_id': {'context_uuid': {'uuid': context_uuid}}, - 'slice_uuid': {'uuid': slice_uuid} - }) - -def grpc_topology_id(context_uuid, topology_uuid): - return TopologyId(**{ - 'context_id': {'context_uuid': {'uuid': context_uuid}}, - 'topology_uuid': {'uuid': topology_uuid} - }) - -def grpc_policy_rule_id(policy_rule_uuid): - return PolicyRuleId(**{ - 'uuid': {'uuid': policy_rule_uuid} - }) - -class _Resource(Resource): - def __init__(self, database : Database) -> None: - super().__init__() - self.database = database - self.servicer = ContextServiceServicerImpl(self.database, None) - -class ContextIds(_Resource): - def get(self): - return format_grpc_to_json(self.servicer.ListContextIds(Empty(), None)) - -class Contexts(_Resource): - def get(self): - return format_grpc_to_json(self.servicer.ListContexts(Empty(), None)) - -class Context(_Resource): - def get(self, context_uuid : str): - return format_grpc_to_json(self.servicer.GetContext(grpc_context_id(context_uuid), None)) - -class TopologyIds(_Resource): - def get(self, context_uuid : str): - return format_grpc_to_json(self.servicer.ListTopologyIds(grpc_context_id(context_uuid), None)) - -class Topologies(_Resource): - def get(self, context_uuid : str): - return format_grpc_to_json(self.servicer.ListTopologies(grpc_context_id(context_uuid), None)) - -class Topology(_Resource): - def get(self, context_uuid : str, topology_uuid : str): - return format_grpc_to_json(self.servicer.GetTopology(grpc_topology_id(context_uuid, topology_uuid), None)) - -class ServiceIds(_Resource): - def get(self, context_uuid : str): - return format_grpc_to_json(self.servicer.ListServiceIds(grpc_context_id(context_uuid), None)) - -class Services(_Resource): - def get(self, context_uuid : str): - return format_grpc_to_json(self.servicer.ListServices(grpc_context_id(context_uuid), None)) - -class Service(_Resource): - def get(self, context_uuid : str, service_uuid : str): - return format_grpc_to_json(self.servicer.GetService(grpc_service_id(context_uuid, service_uuid), None)) - -class SliceIds(_Resource): - def get(self, context_uuid : str): - return format_grpc_to_json(self.servicer.ListSliceIds(grpc_context_id(context_uuid), None)) - -class Slices(_Resource): - def get(self, context_uuid : str): - return format_grpc_to_json(self.servicer.ListSlices(grpc_context_id(context_uuid), None)) - -class Slice(_Resource): - def get(self, context_uuid : str, slice_uuid : str): - return format_grpc_to_json(self.servicer.GetSlice(grpc_slice_id(context_uuid, slice_uuid), None)) - -class DeviceIds(_Resource): - def get(self): - return format_grpc_to_json(self.servicer.ListDeviceIds(Empty(), None)) - -class Devices(_Resource): - def get(self): - return format_grpc_to_json(self.servicer.ListDevices(Empty(), None)) - -class Device(_Resource): - def get(self, device_uuid : str): - return format_grpc_to_json(self.servicer.GetDevice(grpc_device_id(device_uuid), None)) - -class LinkIds(_Resource): - def get(self): - return format_grpc_to_json(self.servicer.ListLinkIds(Empty(), None)) - -class Links(_Resource): - def get(self): - return format_grpc_to_json(self.servicer.ListLinks(Empty(), None)) - -class Link(_Resource): - def get(self, link_uuid : str): - return format_grpc_to_json(self.servicer.GetLink(grpc_link_id(link_uuid), None)) - -class ConnectionIds(_Resource): - def get(self, context_uuid : str, service_uuid : str): - return format_grpc_to_json(self.servicer.ListConnectionIds(grpc_service_id(context_uuid, service_uuid), None)) - -class Connections(_Resource): - def get(self, context_uuid : str, service_uuid : str): - return format_grpc_to_json(self.servicer.ListConnections(grpc_service_id(context_uuid, service_uuid), None)) - -class Connection(_Resource): - def get(self, connection_uuid : str): - return format_grpc_to_json(self.servicer.GetConnection(grpc_connection_id(connection_uuid), None)) - -class PolicyRuleIds(_Resource): - def get(self): - return format_grpc_to_json(self.servicer.ListPolicyRuleIds(Empty(), None)) - -class PolicyRules(_Resource): - def get(self): - return format_grpc_to_json(self.servicer.ListPolicyRules(Empty(), None)) - -class PolicyRule(_Resource): - def get(self, policy_rule_uuid : str): - return format_grpc_to_json(self.servicer.GetPolicyRule(grpc_policy_rule_id(policy_rule_uuid), None)) - -class DumpText(Resource): - def __init__(self, database : Database) -> None: - super().__init__() - self.database = database - - def get(self): - db_entries = self.database.dump() - num_entries = len(db_entries) - response = ['----- Database Dump [{:3d} entries] -------------------------'.format(num_entries)] - for db_entry in db_entries: - response.append(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover - response.append('-----------------------------------------------------------') - headers = {'Content-Type': 'text/plain'} - return make_response('\n'.join(response), 200, headers) - -class DumpHtml(Resource): - def __init__(self, database : Database) -> None: - super().__init__() - self.database = database - - def get(self): - db_entries = self.database.dump() - num_entries = len(db_entries) - response = [] - response.append('<HTML><HEAD><TITLE>Database Dump [{:3d} entries]</TITLE></HEAD><BODY>'.format(num_entries)) - response.append('<H3>Database Dump [{:3d} entries]</H3><HR/>'.format(num_entries)) - response.append('<TABLE border=1>') - response.append('<TR><TH>Type</TH><TH>Key</TH><TH>Value</TH></TR>') - for db_entry in db_entries: - response.append('<TR><TD>{:s}</TD><TD>{:s}</TD><TD>{:s}</TD></TR>'.format(*db_entry)) - response.append('</TABLE></BODY></HTML>') - - headers = {'Content-Type': 'text/html'} - return make_response(''.join(response), 200, headers) - - -# Use 'path' type in Service and Sink because service_uuid and link_uuid might contain char '/' and Flask is unable to -# recognize them in 'string' type. -RESOURCES = [ - # (endpoint_name, resource_class, resource_url) - ('api.context_ids', ContextIds, '/context_ids'), - ('api.contexts', Contexts, '/contexts'), - ('api.context', Context, '/context/<string:context_uuid>'), - - ('api.topology_ids', TopologyIds, '/context/<string:context_uuid>/topology_ids'), - ('api.topologies', Topologies, '/context/<string:context_uuid>/topologies'), - ('api.topology', Topology, '/context/<string:context_uuid>/topology/<string:topology_uuid>'), - - ('api.service_ids', ServiceIds, '/context/<string:context_uuid>/service_ids'), - ('api.services', Services, '/context/<string:context_uuid>/services'), - ('api.service', Service, '/context/<string:context_uuid>/service/<path:service_uuid>'), - - ('api.slice_ids', SliceIds, '/context/<string:context_uuid>/slice_ids'), - ('api.slices', Slices, '/context/<string:context_uuid>/slices'), - ('api.slice', Slice, '/context/<string:context_uuid>/slice/<path:slice_uuid>'), - - ('api.device_ids', DeviceIds, '/device_ids'), - ('api.devices', Devices, '/devices'), - ('api.device', Device, '/device/<string:device_uuid>'), - - ('api.link_ids', LinkIds, '/link_ids'), - ('api.links', Links, '/links'), - ('api.link', Link, '/link/<path:link_uuid>'), - - ('api.connection_ids', ConnectionIds, '/context/<string:context_uuid>/service/<path:service_uuid>/connection_ids'), - ('api.connections', Connections, '/context/<string:context_uuid>/service/<path:service_uuid>/connections'), - ('api.connection', Connection, '/connection/<path:connection_uuid>'), - - ('api.policyrule_ids', PolicyRuleIds, '/policyrule_ids'), - ('api.policyrules', PolicyRules, '/policyrules'), - ('api.policyrule', PolicyRule, '/policyrule/<string:policyrule_uuid>'), - - ('api.dump.text', DumpText, '/dump/text'), - ('api.dump.html', DumpHtml, '/dump/html'), -] diff --git a/src/context/service/_old_code/__init__.py b/src/context/service/_old_code/__init__.py deleted file mode 100644 index 70a332512..000000000 --- a/src/context/service/_old_code/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - diff --git a/src/context/service/_old_code/__main__.py b/src/context/service/_old_code/__main__.py deleted file mode 100644 index 69d3f5cbe..000000000 --- a/src/context/service/_old_code/__main__.py +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging, signal, sys, threading -from prometheus_client import start_http_server -from common.Settings import get_log_level, get_metrics_port, get_setting -from common.orm.Database import Database -from common.orm.Factory import get_database_backend -from common.message_broker.Factory import get_messagebroker_backend -from common.message_broker.MessageBroker import MessageBroker -from context.service.grpc_server.ContextService import ContextService -from .Config import POPULATE_FAKE_DATA -from .Populate import populate -from .Resources import RESOURCES -from .RestServer import RestServer - -terminate = threading.Event() -LOGGER = None - -def signal_handler(signal, frame): # pylint: disable=redefined-outer-name - LOGGER.warning('Terminate signal received') - terminate.set() - -def main(): - global LOGGER # pylint: disable=global-statement - - log_level = get_log_level() - logging.basicConfig(level=log_level, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s") - LOGGER = logging.getLogger(__name__) - - signal.signal(signal.SIGINT, signal_handler) - signal.signal(signal.SIGTERM, signal_handler) - - LOGGER.info('Starting...') - - # Start metrics server - metrics_port = get_metrics_port() - start_http_server(metrics_port) - - # Get database instance - database = Database(get_database_backend()) - - # Get message broker instance - messagebroker = MessageBroker(get_messagebroker_backend()) - - # Starting context service - grpc_service = ContextService(database, messagebroker) - grpc_service.start() - - rest_server = RestServer() - for endpoint_name, resource_class, resource_url in RESOURCES: - rest_server.add_resource(resource_class, resource_url, endpoint=endpoint_name, resource_class_args=(database,)) - rest_server.start() - - populate_fake_data = get_setting('POPULATE_FAKE_DATA', default=POPULATE_FAKE_DATA) - if isinstance(populate_fake_data, str): populate_fake_data = (populate_fake_data.upper() in {'T', '1', 'TRUE'}) - if populate_fake_data: - LOGGER.info('Populating fake data...') - populate(host='127.0.0.1', port=grpc_service.bind_port) - LOGGER.info('Fake Data populated') - - # Wait for Ctrl+C or termination signal - while not terminate.wait(timeout=0.1): pass - - LOGGER.info('Terminating...') - grpc_service.stop() - rest_server.shutdown() - rest_server.join() - - LOGGER.info('Bye') - return 0 - -if __name__ == '__main__': - sys.exit(main()) diff --git a/src/context/service/_old_code/_test_restapi.py b/src/context/service/_old_code/_test_restapi.py deleted file mode 100644 index 82a8bca40..000000000 --- a/src/context/service/_old_code/_test_restapi.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -#from context.service._old_code.Populate import populate -#from context.service.rest_server.RestServer import RestServer -#from context.service.rest_server.Resources import RESOURCES - -LOGGER = logging.getLogger(__name__) -LOGGER.setLevel(logging.DEBUG) - -#def do_rest_request(url : str): -# base_url = get_service_baseurl_http(ServiceNameEnum.CONTEXT) -# request_url = 'http://{:s}:{:s}{:s}{:s}'.format(str(LOCAL_HOST), str(HTTP_PORT), str(base_url), url) -# LOGGER.warning('Request: GET {:s}'.format(str(request_url))) -# reply = requests.get(request_url) -# LOGGER.warning('Reply: {:s}'.format(str(reply.text))) -# assert reply.status_code == 200, 'Reply failed with code {}'.format(reply.status_code) -# return reply.json() - diff --git a/src/context/service/_old_code/test_unitary.py b/src/context/service/_old_code/test_unitary.py deleted file mode 100644 index 5a0dcb9c1..000000000 --- a/src/context/service/_old_code/test_unitary.py +++ /dev/null @@ -1,1450 +0,0 @@ -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# pylint: disable=too-many-lines -import copy, grpc, logging, os, pytest, requests, time, urllib -from typing import Tuple -from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID, ServiceNameEnum -from common.Settings import ( - ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, ENVVAR_SUFIX_SERVICE_PORT_HTTP, get_env_var_name, - get_service_baseurl_http, get_service_port_grpc, get_service_port_http) -from context.service.Database import Database -from common.message_broker.Factory import get_messagebroker_backend, BackendEnum as MessageBrokerBackendEnum -from common.message_broker.MessageBroker import MessageBroker -from common.proto.context_pb2 import ( - Connection, ConnectionEvent, ConnectionId, Context, ContextEvent, ContextId, Device, DeviceEvent, DeviceId, - DeviceOperationalStatusEnum, Empty, EventTypeEnum, Link, LinkEvent, LinkId, Service, ServiceEvent, ServiceId, - ServiceStatusEnum, ServiceTypeEnum, Topology, TopologyEvent, TopologyId) -from common.proto.policy_pb2 import (PolicyRuleIdList, PolicyRuleId, PolicyRuleList, PolicyRule) -from common.type_checkers.Assertions import ( - validate_connection, validate_connection_ids, validate_connections, validate_context, validate_context_ids, - validate_contexts, validate_device, validate_device_ids, validate_devices, validate_link, validate_link_ids, - validate_links, validate_service, validate_service_ids, validate_services, validate_topologies, validate_topology, - validate_topology_ids) -from context.client.ContextClient import ContextClient -from context.client.EventsCollector import EventsCollector -from context.service.database.tools.Tools import ( - FASTHASHER_DATA_ACCEPTED_FORMAT, FASTHASHER_ITEM_ACCEPTED_FORMAT, fast_hasher) -from context.service.grpc_server.ContextService import ContextService -from context.service._old_code.Populate import populate -from context.service.rest_server.RestServer import RestServer -from context.service.rest_server.Resources import RESOURCES -from requests import Session -from sqlalchemy import create_engine -from sqlalchemy.orm import sessionmaker -from context.service.database.models._Base import Base - -from .Objects import ( - CONNECTION_R1_R3, CONNECTION_R1_R3_ID, CONNECTION_R1_R3_UUID, CONTEXT, CONTEXT_ID, DEVICE_R1, DEVICE_R1_ID, - DEVICE_R1_UUID, DEVICE_R2, DEVICE_R2_ID, DEVICE_R2_UUID, DEVICE_R3, DEVICE_R3_ID, DEVICE_R3_UUID, LINK_R1_R2, - LINK_R1_R2_ID, LINK_R1_R2_UUID, SERVICE_R1_R2, SERVICE_R1_R2_ID, SERVICE_R1_R2_UUID, SERVICE_R1_R3, - SERVICE_R1_R3_ID, SERVICE_R1_R3_UUID, SERVICE_R2_R3, SERVICE_R2_R3_ID, SERVICE_R2_R3_UUID, TOPOLOGY, TOPOLOGY_ID, - POLICY_RULE, POLICY_RULE_ID, POLICY_RULE_UUID) - -LOGGER = logging.getLogger(__name__) -LOGGER.setLevel(logging.DEBUG) - -LOCAL_HOST = '127.0.0.1' -GRPC_PORT = 10000 + int(get_service_port_grpc(ServiceNameEnum.CONTEXT)) # avoid privileged ports -HTTP_PORT = 10000 + int(get_service_port_http(ServiceNameEnum.CONTEXT)) # avoid privileged ports - -os.environ[get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_HOST )] = str(LOCAL_HOST) -os.environ[get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(GRPC_PORT) -os.environ[get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_HTTP)] = str(HTTP_PORT) - -DEFAULT_REDIS_SERVICE_HOST = LOCAL_HOST -DEFAULT_REDIS_SERVICE_PORT = 6379 -DEFAULT_REDIS_DATABASE_ID = 0 - -REDIS_CONFIG = { - 'REDIS_SERVICE_HOST': os.environ.get('REDIS_SERVICE_HOST', DEFAULT_REDIS_SERVICE_HOST), - 'REDIS_SERVICE_PORT': os.environ.get('REDIS_SERVICE_PORT', DEFAULT_REDIS_SERVICE_PORT), - 'REDIS_DATABASE_ID' : os.environ.get('REDIS_DATABASE_ID', DEFAULT_REDIS_DATABASE_ID ), -} - -SCENARIOS = [ - ('all_sqlalchemy', {}, MessageBrokerBackendEnum.INMEMORY, {} ), - ('all_inmemory', DatabaseBackendEnum.INMEMORY, {}, MessageBrokerBackendEnum.INMEMORY, {} ) -# ('all_redis', DatabaseBackendEnum.REDIS, REDIS_CONFIG, MessageBrokerBackendEnum.REDIS, REDIS_CONFIG), -] - -@pytest.fixture(scope='session', ids=[str(scenario[0]) for scenario in SCENARIOS], params=SCENARIOS) -def context_s_mb(request) -> Tuple[Session, MessageBroker]: - name,db_session,mb_backend,mb_settings = request.param - msg = 'Running scenario {:s} db_session={:s}, mb_backend={:s}, mb_settings={:s}...' - LOGGER.info(msg.format(str(name), str(db_session), str(mb_backend.value), str(mb_settings))) - - db_uri = 'cockroachdb://root@10.152.183.111:26257/defaultdb?sslmode=disable' - LOGGER.debug('Connecting to DB: {}'.format(db_uri)) - - try: - engine = create_engine(db_uri) - except Exception as e: - LOGGER.error("Failed to connect to database.") - LOGGER.error(f"{e}") - return 1 - - Base.metadata.create_all(engine) - _session = sessionmaker(bind=engine, expire_on_commit=False) - - _message_broker = MessageBroker(get_messagebroker_backend(backend=mb_backend, **mb_settings)) - yield _session, _message_broker - _message_broker.terminate() - -@pytest.fixture(scope='session') -def context_service_grpc(context_s_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name - _service = ContextService(context_s_mb[0], context_s_mb[1]) - _service.start() - yield _service - _service.stop() -@pytest.fixture(scope='session') -def context_service_rest(context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name - database = context_db_mb[0] - _rest_server = RestServer() - for endpoint_name, resource_class, resource_url in RESOURCES: - _rest_server.add_resource(resource_class, resource_url, endpoint=endpoint_name, resource_class_args=(database,)) - _rest_server.start() - time.sleep(1) # bring time for the server to start - yield _rest_server - _rest_server.shutdown() - _rest_server.join() -@pytest.fixture(scope='session') -def context_client_grpc(context_service_grpc : ContextService): # pylint: disable=redefined-outer-name - _client = ContextClient() - yield _client - _client.close() -""" -def do_rest_request(url : str): - base_url = get_service_baseurl_http(ServiceNameEnum.CONTEXT) - request_url = 'http://{:s}:{:s}{:s}{:s}'.format(str(LOCAL_HOST), str(HTTP_PORT), str(base_url), url) - LOGGER.warning('Request: GET {:s}'.format(str(request_url))) - reply = requests.get(request_url) - LOGGER.warning('Reply: {:s}'.format(str(reply.text))) - assert reply.status_code == 200, 'Reply failed with code {}'.format(reply.status_code) - return reply.json() -""" - -"""# ----- Test gRPC methods ---------------------------------------------------------------------------------------------- -def test_grpc_context( - context_client_grpc : ContextClient, # pylint: disable=redefined-outer-name - context_s_mb : Tuple[Session, MessageBroker]): # pylint: disable=redefined-outer-name - Session = context_s_mb[0] - - database = Database(Session) - - # ----- Clean the database ----------------------------------------------------------------------------------------- - database.clear() - # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- - events_collector = EventsCollector(context_client_grpc) - events_collector.start() - - # ----- Get when the object does not exist ------------------------------------------------------------------------- - with pytest.raises(grpc.RpcError) as e: - context_client_grpc.GetContext(ContextId(**CONTEXT_ID)) - assert e.value.code() == grpc.StatusCode.NOT_FOUND - assert e.value.details() == 'Context({:s}) not found'.format(DEFAULT_CONTEXT_UUID) - - # ----- List when the object does not exist ------------------------------------------------------------------------ - response = context_client_grpc.ListContextIds(Empty()) - assert len(response.context_ids) == 0 - - response = context_client_grpc.ListContexts(Empty()) - assert len(response.contexts) == 0 - - # ----- Dump state of database before create the object ------------------------------------------------------------ - db_entries = database.dump_all() - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 0 - - # ----- Create the object ------------------------------------------------------------------------------------------ - response = context_client_grpc.SetContext(Context(**CONTEXT)) - assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - wrong_uuid = 'c97c4185-e1d1-4ea7-b6b9-afbf76cb61f4' - with pytest.raises(grpc.RpcError) as e: - WRONG_TOPOLOGY_ID = copy.deepcopy(TOPOLOGY_ID) - WRONG_TOPOLOGY_ID['context_id']['context_uuid']['uuid'] = wrong_uuid - WRONG_CONTEXT = copy.deepcopy(CONTEXT) - WRONG_CONTEXT['topology_ids'].append(WRONG_TOPOLOGY_ID) - context_client_grpc.SetContext(Context(**WRONG_CONTEXT)) - assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT - msg = 'request.topology_ids[0].context_id.context_uuid.uuid({}) is invalid; '\ - 'should be == request.context_id.context_uuid.uuid({})'.format(wrong_uuid, DEFAULT_CONTEXT_UUID) - assert e.value.details() == msg - - with pytest.raises(grpc.RpcError) as e: - WRONG_SERVICE_ID = copy.deepcopy(SERVICE_R1_R2_ID) - WRONG_SERVICE_ID['context_id']['context_uuid']['uuid'] = wrong_uuid - WRONG_CONTEXT = copy.deepcopy(CONTEXT) - WRONG_CONTEXT['service_ids'].append(WRONG_SERVICE_ID) - context_client_grpc.SetContext(Context(**WRONG_CONTEXT)) - assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT - msg = 'request.service_ids[0].context_id.context_uuid.uuid({}) is invalid; '\ - 'should be == request.context_id.context_uuid.uuid({})'.format(wrong_uuid, DEFAULT_CONTEXT_UUID) - assert e.value.details() == msg - - # ----- Check create event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True) - assert isinstance(event, ContextEvent) - assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert event.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - # ----- Update the object ------------------------------------------------------------------------------------------ - response = context_client_grpc.SetContext(Context(**CONTEXT)) - assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - # ----- Check update event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True) - assert isinstance(event, ContextEvent) - assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - assert event.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - # ----- Dump state of database after create/update the object ------------------------------------------------------ - db_entries = database.dump_all() - - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(db_entry) - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 1 - - # ----- Get when the object exists --------------------------------------------------------------------------------- - response = context_client_grpc.GetContext(ContextId(**CONTEXT_ID)) - assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert len(response.topology_ids) == 0 - assert len(response.service_ids) == 0 - - # ----- List when the object exists -------------------------------------------------------------------------------- - response = context_client_grpc.ListContextIds(Empty()) - assert len(response.context_ids) == 1 - assert response.context_ids[0].context_uuid.uuid == DEFAULT_CONTEXT_UUID - - response = context_client_grpc.ListContexts(Empty()) - assert len(response.contexts) == 1 - assert response.contexts[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert len(response.contexts[0].topology_ids) == 0 - assert len(response.contexts[0].service_ids) == 0 - - # ----- Remove the object ------------------------------------------------------------------------------------------ - context_client_grpc.RemoveContext(ContextId(**CONTEXT_ID)) - - # ----- Check remove event ----------------------------------------------------------------------------------------- - # event = events_collector.get_event(block=True) - # assert isinstance(event, ContextEvent) - # assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - # assert event.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- - events_collector.stop() - - # ----- Dump state of database after remove the object ------------------------------------------------------------- - db_entries = database.dump_all() - - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(db_entry) - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 0 - - -def test_grpc_topology( - context_client_grpc: ContextClient, # pylint: disable=redefined-outer-name - context_s_mb: Tuple[Session, MessageBroker]): # pylint: disable=redefined-outer-name - session = context_s_mb[0] - - database = Database(session) - - # ----- Clean the database ----------------------------------------------------------------------------------------- - database.clear() - - # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- - events_collector = EventsCollector(context_client_grpc) - events_collector.start() - - # ----- Prepare dependencies for the test and capture related events ----------------------------------------------- - response = context_client_grpc.SetContext(Context(**CONTEXT)) - assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID - # event = events_collector.get_event(block=True) - # assert isinstance(event, ContextEvent) - # assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE - # assert event.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - # ----- Get when the object does not exist ------------------------------------------------------------------------- - with pytest.raises(grpc.RpcError) as e: - context_client_grpc.GetTopology(TopologyId(**TOPOLOGY_ID)) - assert e.value.code() == grpc.StatusCode.NOT_FOUND - # assert e.value.details() == 'Topology({:s}/{:s}) not found'.format(DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID) - assert e.value.details() == 'Topology({:s}) not found'.format(DEFAULT_TOPOLOGY_UUID) - # ----- List when the object does not exist ------------------------------------------------------------------------ - response = context_client_grpc.ListTopologyIds(ContextId(**CONTEXT_ID)) - assert len(response.topology_ids) == 0 - response = context_client_grpc.ListTopologies(ContextId(**CONTEXT_ID)) - assert len(response.topologies) == 0 - - # ----- Dump state of database before create the object ------------------------------------------------------------ - db_entries = database.dump_all() - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(db_entry) - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 1 - - # ----- Create the object ------------------------------------------------------------------------------------------ - response = context_client_grpc.SetTopology(Topology(**TOPOLOGY)) - assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - - CONTEXT_WITH_TOPOLOGY = copy.deepcopy(CONTEXT) - CONTEXT_WITH_TOPOLOGY['topology_ids'].append(TOPOLOGY_ID) - response = context_client_grpc.SetContext(Context(**CONTEXT_WITH_TOPOLOGY)) - assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - # ----- Check create event ----------------------------------------------------------------------------------------- - # events = events_collector.get_events(block=True, count=2) - - # assert isinstance(events[0], TopologyEvent) - # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - # assert events[0].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - # assert events[0].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - - # assert isinstance(events[1], ContextEvent) - # assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - # assert events[1].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - # ----- Update the object ------------------------------------------------------------------------------------------ - response = context_client_grpc.SetTopology(Topology(**TOPOLOGY)) - assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - - # ----- Check update event ----------------------------------------------------------------------------------------- - # event = events_collector.get_event(block=True) - # assert isinstance(event, TopologyEvent) - # assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - # assert event.topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - # assert event.topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - - # ----- Dump state of database after create/update the object ------------------------------------------------------ - db_entries = database.dump_all() - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(db_entry) - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 2 - - # ----- Get when the object exists --------------------------------------------------------------------------------- - response = context_client_grpc.GetTopology(TopologyId(**TOPOLOGY_ID)) - assert response.topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - assert len(response.device_ids) == 0 - assert len(response.link_ids) == 0 - - # ----- List when the object exists -------------------------------------------------------------------------------- - response = context_client_grpc.ListTopologyIds(ContextId(**CONTEXT_ID)) - assert len(response.topology_ids) == 1 - assert response.topology_ids[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.topology_ids[0].topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - - response = context_client_grpc.ListTopologies(ContextId(**CONTEXT_ID)) - assert len(response.topologies) == 1 - assert response.topologies[0].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.topologies[0].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - assert len(response.topologies[0].device_ids) == 0 - assert len(response.topologies[0].link_ids) == 0 - - # ----- Remove the object ------------------------------------------------------------------------------------------ - context_client_grpc.RemoveTopology(TopologyId(**TOPOLOGY_ID)) - context_client_grpc.RemoveContext(ContextId(**CONTEXT_ID)) - - # ----- Check remove event ----------------------------------------------------------------------------------------- - # events = events_collector.get_events(block=True, count=2) - - # assert isinstance(events[0], TopologyEvent) - # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - # assert events[0].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - # assert events[0].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - - # assert isinstance(events[1], ContextEvent) - # assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - # assert events[1].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- - # events_collector.stop() - - # ----- Dump state of database after remove the object ------------------------------------------------------------- - db_entries = database.dump_all() - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(db_entry) - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 0 - - -def test_grpc_device( - context_client_grpc: ContextClient, # pylint: disable=redefined-outer-name - context_s_mb: Tuple[Session, MessageBroker]): # pylint: disable=redefined-outer-name - session = context_s_mb[0] - - database = Database(session) - - # ----- Clean the database ----------------------------------------------------------------------------------------- - database.clear() - - # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- - events_collector = EventsCollector(context_client_grpc) - events_collector.start() - - # ----- Prepare dependencies for the test and capture related events ----------------------------------------------- - response = context_client_grpc.SetContext(Context(**CONTEXT)) - assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - response = context_client_grpc.SetTopology(Topology(**TOPOLOGY)) - assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - - events = events_collector.get_events(block=True, count=2) - - assert isinstance(events[0], ContextEvent) - assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - assert isinstance(events[1], TopologyEvent) - assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[1].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert events[1].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - - # ----- Get when the object does not exist ------------------------------------------------------------------------- - with pytest.raises(grpc.RpcError) as e: - context_client_grpc.GetDevice(DeviceId(**DEVICE_R1_ID)) - assert e.value.code() == grpc.StatusCode.NOT_FOUND - assert e.value.details() == 'Device({:s}) not found'.format(DEVICE_R1_UUID) - - # ----- List when the object does not exist ------------------------------------------------------------------------ - response = context_client_grpc.ListDeviceIds(Empty()) - assert len(response.device_ids) == 0 - - response = context_client_grpc.ListDevices(Empty()) - assert len(response.devices) == 0 - - # ----- Dump state of database before create the object ------------------------------------------------------------ - db_entries = database.dump_all() - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(db_entry) - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 2 - - # ----- Create the object ------------------------------------------------------------------------------------------ - with pytest.raises(grpc.RpcError) as e: - WRONG_DEVICE = copy.deepcopy(DEVICE_R1) - WRONG_DEVICE_UUID = '3f03c76d-31fb-47f5-9c1d-bc6b6bfa2d08' - WRONG_DEVICE['device_endpoints'][0]['endpoint_id']['device_id']['device_uuid']['uuid'] = WRONG_DEVICE_UUID - context_client_grpc.SetDevice(Device(**WRONG_DEVICE)) - assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT - msg = 'request.device_endpoints[0].device_id.device_uuid.uuid({}) is invalid; '\ - 'should be == request.device_id.device_uuid.uuid({})'.format(WRONG_DEVICE_UUID, DEVICE_R1_UUID) - assert e.value.details() == msg - response = context_client_grpc.SetDevice(Device(**DEVICE_R1)) - assert response.device_uuid.uuid == DEVICE_R1_UUID - - # ----- Check create event ----------------------------------------------------------------------------------------- - # event = events_collector.get_event(block=True) - # assert isinstance(event, DeviceEvent) - # assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE - # assert event.device_id.device_uuid.uuid == DEVICE_R1_UUID - - # ----- Update the object ------------------------------------------------------------------------------------------ - response = context_client_grpc.SetDevice(Device(**DEVICE_R1)) - assert response.device_uuid.uuid == DEVICE_R1_UUID - - # ----- Check update event ----------------------------------------------------------------------------------------- - # event = events_collector.get_event(block=True) - # assert isinstance(event, DeviceEvent) - # assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - # assert event.device_id.device_uuid.uuid == DEVICE_R1_UUID - - # ----- Dump state of database after create/update the object ------------------------------------------------------ - db_entries = database.dump_all() - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(db_entry) - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 47 - - # ----- Get when the object exists --------------------------------------------------------------------------------- - response = context_client_grpc.GetDevice(DeviceId(**DEVICE_R1_ID)) - assert response.device_id.device_uuid.uuid == DEVICE_R1_UUID - assert response.device_type == 'packet-router' - assert len(response.device_config.config_rules) == 3 - assert response.device_operational_status == DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_DISABLED - assert len(response.device_drivers) == 1 - assert len(response.device_endpoints) == 3 - - # ----- List when the object exists -------------------------------------------------------------------------------- - response = context_client_grpc.ListDeviceIds(Empty()) - assert len(response.device_ids) == 1 - assert response.device_ids[0].device_uuid.uuid == DEVICE_R1_UUID - - response = context_client_grpc.ListDevices(Empty()) - assert len(response.devices) == 1 - assert response.devices[0].device_id.device_uuid.uuid == DEVICE_R1_UUID - assert response.devices[0].device_type == 'packet-router' - assert len(response.devices[0].device_config.config_rules) == 3 - assert response.devices[0].device_operational_status == DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_DISABLED - assert len(response.devices[0].device_drivers) == 1 - assert len(response.devices[0].device_endpoints) == 3 - - # ----- Create object relation ------------------------------------------------------------------------------------- - TOPOLOGY_WITH_DEVICE = copy.deepcopy(TOPOLOGY) - TOPOLOGY_WITH_DEVICE['device_ids'].append(DEVICE_R1_ID) - response = context_client_grpc.SetTopology(Topology(**TOPOLOGY_WITH_DEVICE)) - assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - - # ----- Check update event ----------------------------------------------------------------------------------------- - # event = events_collector.get_event(block=True) - # assert isinstance(event, TopologyEvent) - # assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - # assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - # assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - - # ----- Check relation was created --------------------------------------------------------------------------------- - response = context_client_grpc.GetTopology(TopologyId(**TOPOLOGY_ID)) - assert response.topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - assert len(response.device_ids) == 1 - assert response.device_ids[0].device_uuid.uuid == DEVICE_R1_UUID - assert len(response.link_ids) == 0 - - # ----- Dump state of database after creating the object relation -------------------------------------------------- - db_entries = database.dump_all() - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(db_entry) - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 47 - - # ----- Remove the object -------------------------------ro----------------------------------------------------------- - context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R1_ID)) - context_client_grpc.RemoveTopology(TopologyId(**TOPOLOGY_ID)) - context_client_grpc.RemoveContext(ContextId(**CONTEXT_ID)) - - # ----- Check remove event ----------------------------------------------------------------------------------------- - # events = events_collector.get_events(block=True, count=3) - - # assert isinstance(events[0], DeviceEvent) - # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - # assert events[0].device_id.device_uuid.uuid == DEVICE_R1_UUID - - # assert isinstance(events[1], TopologyEvent) - # assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - # assert events[1].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - # assert events[1].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - - # assert isinstance(events[2], ContextEvent) - # assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - # assert events[2].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- - # events_collector.stop() - - # ----- Dump state of database after remove the object ------------------------------------------------------------- - db_entries = database.dump_all() - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(db_entry) - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 0 - - -def test_grpc_link( - context_client_grpc: ContextClient, # pylint: disable=redefined-outer-name - context_s_mb: Tuple[Session, MessageBroker]): # pylint: disable=redefined-outer-name - session = context_s_mb[0] - - database = Database(session) - - # ----- Clean the database ----------------------------------------------------------------------------------------- - database.clear() - - # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- - events_collector = EventsCollector(context_client_grpc) - events_collector.start() - - # ----- Prepare dependencies for the test and capture related events ----------------------------------------------- - response = context_client_grpc.SetContext(Context(**CONTEXT)) - assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - response = context_client_grpc.SetTopology(Topology(**TOPOLOGY)) - assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - - response = context_client_grpc.SetDevice(Device(**DEVICE_R1)) - assert response.device_uuid.uuid == DEVICE_R1_UUID - - response = context_client_grpc.SetDevice(Device(**DEVICE_R2)) - assert response.device_uuid.uuid == DEVICE_R2_UUID - # events = events_collector.get_events(block=True, count=4) - - # assert isinstance(events[0], ContextEvent) - # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - # assert events[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - # - # assert isinstance(events[1], TopologyEvent) - # assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - # assert events[1].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - # assert events[1].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - # - # assert isinstance(events[2], DeviceEvent) - # assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - # assert events[2].device_id.device_uuid.uuid == DEVICE_R1_UUID - # - # assert isinstance(events[3], DeviceEvent) - # assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - # assert events[3].device_id.device_uuid.uuid == DEVICE_R2_UUID - - # ----- Get when the object does not exist ------------------------------------------------------------------------- - with pytest.raises(grpc.RpcError) as e: - context_client_grpc.GetLink(LinkId(**LINK_R1_R2_ID)) - assert e.value.code() == grpc.StatusCode.NOT_FOUND - assert e.value.details() == 'Link({:s}) not found'.format(LINK_R1_R2_UUID) - - # ----- List when the object does not exist ------------------------------------------------------------------------ - response = context_client_grpc.ListLinkIds(Empty()) - assert len(response.link_ids) == 0 - - response = context_client_grpc.ListLinks(Empty()) - assert len(response.links) == 0 - - # ----- Dump state of database before create the object ------------------------------------------------------------ - db_entries = database.dump_all() - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(db_entry) - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 80 - - # ----- Create the object ------------------------------------------------------------------------------------------ - response = context_client_grpc.SetLink(Link(**LINK_R1_R2)) - assert response.link_uuid.uuid == LINK_R1_R2_UUID - - # ----- Check create event ----------------------------------------------------------------------------------------- - # event = events_collector.get_event(block=True) - # assert isinstance(event, LinkEvent) - # assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE - # assert event.link_id.link_uuid.uuid == LINK_R1_R2_UUID - - # ----- Update the object ------------------------------------------------------------------------------------------ - response = context_client_grpc.SetLink(Link(**LINK_R1_R2)) - assert response.link_uuid.uuid == LINK_R1_R2_UUID - # ----- Check update event ----------------------------------------------------------------------------------------- - # event = events_collector.get_event(block=True) - # assert isinstance(event, LinkEvent) - # assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - # assert event.link_id.link_uuid.uuid == LINK_R1_R2_UUID - - # ----- Dump state of database after create/update the object ------------------------------------------------------ - db_entries = database.dump_all() - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(db_entry) - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 88 - - # ----- Get when the object exists --------------------------------------------------------------------------------- - response = context_client_grpc.GetLink(LinkId(**LINK_R1_R2_ID)) - assert response.link_id.link_uuid.uuid == LINK_R1_R2_UUID - assert len(response.link_endpoint_ids) == 2 - - # ----- List when the object exists -------------------------------------------------------------------------------- - response = context_client_grpc.ListLinkIds(Empty()) - assert len(response.link_ids) == 1 - assert response.link_ids[0].link_uuid.uuid == LINK_R1_R2_UUID - - response = context_client_grpc.ListLinks(Empty()) - assert len(response.links) == 1 - assert response.links[0].link_id.link_uuid.uuid == LINK_R1_R2_UUID - - assert len(response.links[0].link_endpoint_ids) == 2 - - # ----- Create object relation ------------------------------------------------------------------------------------- - TOPOLOGY_WITH_LINK = copy.deepcopy(TOPOLOGY) - TOPOLOGY_WITH_LINK['link_ids'].append(LINK_R1_R2_ID) - response = context_client_grpc.SetTopology(Topology(**TOPOLOGY_WITH_LINK)) - assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - - # ----- Check update event ----------------------------------------------------------------------------------------- - # event = events_collector.get_event(block=True) - # assert isinstance(event, TopologyEvent) - # assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - # assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - # assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - - # ----- Check relation was created --------------------------------------------------------------------------------- - response = context_client_grpc.GetTopology(TopologyId(**TOPOLOGY_ID)) - assert response.topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - assert len(response.device_ids) == 2 - # assert response.device_ids[0].device_uuid.uuid == DEVICE_R1_UUID - # assert response.device_ids[1].device_uuid.uuid == DEVICE_R2_UUID - assert len(response.link_ids) == 1 - assert response.link_ids[0].link_uuid.uuid == LINK_R1_R2_UUID - - db_entries = database.dump_all() - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(db_entry) - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 88 - - # ----- Remove the object ------------------------------------------------------------------------------------------ - context_client_grpc.RemoveLink(LinkId(**LINK_R1_R2_ID)) - context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R1_ID)) - context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R2_ID)) - context_client_grpc.RemoveTopology(TopologyId(**TOPOLOGY_ID)) - context_client_grpc.RemoveContext(ContextId(**CONTEXT_ID)) - - # ----- Check remove event ----------------------------------------------------------------------------------------- - # events = events_collector.get_events(block=True, count=5) - # - # assert isinstance(events[0], LinkEvent) - # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - # assert events[0].link_id.link_uuid.uuid == LINK_R1_R2_UUID - # - # assert isinstance(events[1], DeviceEvent) - # assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - # assert events[1].device_id.device_uuid.uuid == DEVICE_R1_UUID - # - # assert isinstance(events[2], DeviceEvent) - # assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - # assert events[2].device_id.device_uuid.uuid == DEVICE_R2_UUID - # - # assert isinstance(events[3], TopologyEvent) - # assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - # assert events[3].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - # assert events[3].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - # - # assert isinstance(events[4], ContextEvent) - # assert events[4].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - # assert events[4].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- - events_collector.stop() - - # ----- Dump state of database after remove the object ------------------------------------------------------------- - db_entries = database.dump_all() - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(db_entry) - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 0 -""" - -def test_grpc_service( - context_client_grpc : ContextClient, # pylint: disable=redefined-outer-name - context_s_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name - Session = context_s_mb[0] - # ----- Clean the database ----------------------------------------------------------------------------------------- - database = Database(Session) - database.clear() - - # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- - events_collector = EventsCollector(context_client_grpc) - events_collector.start() - - # ----- Prepare dependencies for the test and capture related events ----------------------------------------------- - response = context_client_grpc.SetContext(Context(**CONTEXT)) - assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - response = context_client_grpc.SetTopology(Topology(**TOPOLOGY)) - assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - - response = context_client_grpc.SetDevice(Device(**DEVICE_R1)) - assert response.device_uuid.uuid == DEVICE_R1_UUID - - response = context_client_grpc.SetDevice(Device(**DEVICE_R2)) - assert response.device_uuid.uuid == DEVICE_R2_UUID - # events = events_collector.get_events(block=True, count=4) - # - # assert isinstance(events[0], ContextEvent) - # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - # assert events[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - # - # assert isinstance(events[1], TopologyEvent) - # assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - # assert events[1].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - # assert events[1].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - # - # assert isinstance(events[2], DeviceEvent) - # assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - # assert events[2].device_id.device_uuid.uuid == DEVICE_R1_UUID - # - # assert isinstance(events[3], DeviceEvent) - # assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - # assert events[3].device_id.device_uuid.uuid == DEVICE_R2_UUID - LOGGER.info('----------------') - - # ----- Get when the object does not exist ------------------------------------------------------------------------- - with pytest.raises(grpc.RpcError) as e: - context_client_grpc.GetService(ServiceId(**SERVICE_R1_R2_ID)) - assert e.value.code() == grpc.StatusCode.NOT_FOUND - assert e.value.details() == 'Service({:s}) not found'.format(SERVICE_R1_R2_UUID) - LOGGER.info('----------------') - - # ----- List when the object does not exist ------------------------------------------------------------------------ - response = context_client_grpc.ListServiceIds(ContextId(**CONTEXT_ID)) - assert len(response.service_ids) == 0 - LOGGER.info('----------------') - - response = context_client_grpc.ListServices(ContextId(**CONTEXT_ID)) - assert len(response.services) == 0 - LOGGER.info('----------------') - - # ----- Dump state of database before create the object ------------------------------------------------------------ - db_entries = database.dump_all() - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(db_entry) - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 80 - - # ----- Create the object ------------------------------------------------------------------------------------------ - with pytest.raises(grpc.RpcError) as e: - WRONG_SERVICE = copy.deepcopy(SERVICE_R1_R2) - WRONG_SERVICE['service_endpoint_ids'][0]\ - ['topology_id']['context_id']['context_uuid']['uuid'] = 'ca1ea172-728f-441d-972c-feeae8c9bffc' - context_client_grpc.SetService(Service(**WRONG_SERVICE)) - assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT - msg = 'request.service_endpoint_ids[0].topology_id.context_id.context_uuid.uuid(ca1ea172-728f-441d-972c-feeae8c9bffc) is invalid; '\ - 'should be == request.service_id.context_id.context_uuid.uuid({:s})'.format(DEFAULT_CONTEXT_UUID) - assert e.value.details() == msg - - response = context_client_grpc.SetService(Service(**SERVICE_R1_R2)) - assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.service_uuid.uuid == SERVICE_R1_R2_UUID - - CONTEXT_WITH_SERVICE = copy.deepcopy(CONTEXT) - CONTEXT_WITH_SERVICE['service_ids'].append(SERVICE_R1_R2_ID) - response = context_client_grpc.SetContext(Context(**CONTEXT_WITH_SERVICE)) - assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - # ----- Check create event ----------------------------------------------------------------------------------------- - events = events_collector.get_events(block=True, count=2) - - assert isinstance(events[0], ServiceEvent) - assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[0].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert events[0].service_id.service_uuid.uuid == SERVICE_R1_R2_UUID - - assert isinstance(events[1], ContextEvent) - assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - assert events[1].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - # ----- Update the object ------------------------------------------------------------------------------------------ - response = context_client_grpc.SetService(Service(**SERVICE_R1_R2)) - assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.service_uuid.uuid == SERVICE_R1_R2_UUID - - # ----- Check update event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True) - assert isinstance(event, ServiceEvent) - assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - assert event.service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert event.service_id.service_uuid.uuid == SERVICE_R1_R2_UUID - - # ----- Dump state of database after create/update the object ------------------------------------------------------ - db_entries = context_database.dump() - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 108 - - # ----- Get when the object exists --------------------------------------------------------------------------------- - response = context_client_grpc.GetService(ServiceId(**SERVICE_R1_R2_ID)) - assert response.service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.service_id.service_uuid.uuid == SERVICE_R1_R2_UUID - assert response.service_type == ServiceTypeEnum.SERVICETYPE_L3NM - assert len(response.service_endpoint_ids) == 2 - assert len(response.service_constraints) == 2 - assert response.service_status.service_status == ServiceStatusEnum.SERVICESTATUS_PLANNED - assert len(response.service_config.config_rules) == 3 - - # ----- List when the object exists -------------------------------------------------------------------------------- - response = context_client_grpc.ListServiceIds(ContextId(**CONTEXT_ID)) - assert len(response.service_ids) == 1 - assert response.service_ids[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.service_ids[0].service_uuid.uuid == SERVICE_R1_R2_UUID - - response = context_client_grpc.ListServices(ContextId(**CONTEXT_ID)) - assert len(response.services) == 1 - assert response.services[0].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.services[0].service_id.service_uuid.uuid == SERVICE_R1_R2_UUID - assert response.services[0].service_type == ServiceTypeEnum.SERVICETYPE_L3NM - assert len(response.services[0].service_endpoint_ids) == 2 - assert len(response.services[0].service_constraints) == 2 - assert response.services[0].service_status.service_status == ServiceStatusEnum.SERVICESTATUS_PLANNED - assert len(response.services[0].service_config.config_rules) == 3 - - # ----- Remove the object ------------------------------------------------------------------------------------------ - context_client_grpc.RemoveService(ServiceId(**SERVICE_R1_R2_ID)) - context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R1_ID)) - context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R2_ID)) - context_client_grpc.RemoveTopology(TopologyId(**TOPOLOGY_ID)) - context_client_grpc.RemoveContext(ContextId(**CONTEXT_ID)) - - # ----- Check remove event ----------------------------------------------------------------------------------------- - events = events_collector.get_events(block=True, count=5) - - assert isinstance(events[0], ServiceEvent) - assert events[0].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert events[0].service_id.service_uuid.uuid == SERVICE_R1_R2_UUID - - assert isinstance(events[1], DeviceEvent) - assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[1].device_id.device_uuid.uuid == DEVICE_R1_UUID - - assert isinstance(events[2], DeviceEvent) - assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[2].device_id.device_uuid.uuid == DEVICE_R2_UUID - - assert isinstance(events[3], TopologyEvent) - assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[3].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert events[3].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - - assert isinstance(events[4], ContextEvent) - assert events[4].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[4].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- - events_collector.stop() - - # ----- Dump state of database after remove the object ------------------------------------------------------------- - db_entries = context_database.dump() - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 0 - - -""" - -def test_grpc_connection( - context_client_grpc : ContextClient, # pylint: disable=redefined-outer-name - context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name - Session = context_s_mb[0] - - database = Database(Session) - - # ----- Clean the database ----------------------------------------------------------------------------------------- - database.clear() - - # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- - events_collector = EventsCollector(context_client_grpc) - events_collector.start() - - # ----- Prepare dependencies for the test and capture related events ----------------------------------------------- - response = context_client_grpc.SetContext(Context(**CONTEXT)) - assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - response = context_client_grpc.SetTopology(Topology(**TOPOLOGY)) - assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - - response = context_client_grpc.SetDevice(Device(**DEVICE_R1)) - assert response.device_uuid.uuid == DEVICE_R1_UUID - - response = context_client_grpc.SetDevice(Device(**DEVICE_R2)) - assert response.device_uuid.uuid == DEVICE_R2_UUID - - response = context_client_grpc.SetDevice(Device(**DEVICE_R3)) - assert response.device_uuid.uuid == DEVICE_R3_UUID - - response = context_client_grpc.SetService(Service(**SERVICE_R1_R2)) - assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.service_uuid.uuid == SERVICE_R1_R2_UUID - - CONTEXT_WITH_SERVICE = copy.deepcopy(CONTEXT) - CONTEXT_WITH_SERVICE['service_ids'].append(SERVICE_R1_R2_ID) - response = context_client_grpc.SetContext(Context(**CONTEXT_WITH_SERVICE)) - assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - response = context_client_grpc.SetService(Service(**SERVICE_R2_R3)) - assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.service_uuid.uuid == SERVICE_R2_R3_UUID - - CONTEXT_WITH_SERVICE = copy.deepcopy(CONTEXT) - CONTEXT_WITH_SERVICE['service_ids'].append(SERVICE_R2_R3_ID) - response = context_client_grpc.SetContext(Context(**CONTEXT_WITH_SERVICE)) - assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - response = context_client_grpc.SetService(Service(**SERVICE_R1_R3)) - assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.service_uuid.uuid == SERVICE_R1_R3_UUID - - CONTEXT_WITH_SERVICE = copy.deepcopy(CONTEXT) - CONTEXT_WITH_SERVICE['service_ids'].append(SERVICE_R1_R3_ID) - response = context_client_grpc.SetContext(Context(**CONTEXT_WITH_SERVICE)) - assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - events = events_collector.get_events(block=True, count=11) - - assert isinstance(events[0], ContextEvent) - assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - assert isinstance(events[1], TopologyEvent) - assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[1].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert events[1].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - - assert isinstance(events[2], DeviceEvent) - assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[2].device_id.device_uuid.uuid == DEVICE_R1_UUID - - assert isinstance(events[3], DeviceEvent) - assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[3].device_id.device_uuid.uuid == DEVICE_R2_UUID - - assert isinstance(events[4], DeviceEvent) - assert events[4].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[4].device_id.device_uuid.uuid == DEVICE_R3_UUID - - assert isinstance(events[5], ServiceEvent) - assert events[5].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[5].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert events[5].service_id.service_uuid.uuid == SERVICE_R1_R2_UUID - - assert isinstance(events[6], ContextEvent) - assert events[6].event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - assert events[6].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - assert isinstance(events[7], ServiceEvent) - assert events[7].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[7].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert events[7].service_id.service_uuid.uuid == SERVICE_R2_R3_UUID - - assert isinstance(events[8], ContextEvent) - assert events[8].event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - assert events[8].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - assert isinstance(events[9], ServiceEvent) - assert events[9].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[9].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert events[9].service_id.service_uuid.uuid == SERVICE_R1_R3_UUID - - assert isinstance(events[10], ContextEvent) - assert events[10].event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - assert events[10].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - # ----- Get when the object does not exist ------------------------------------------------------------------------- - with pytest.raises(grpc.RpcError) as e: - context_client_grpc.GetConnection(ConnectionId(**CONNECTION_R1_R3_ID)) - assert e.value.code() == grpc.StatusCode.NOT_FOUND - assert e.value.details() == 'Connection({:s}) not found'.format(CONNECTION_R1_R3_UUID) - - # ----- List when the object does not exist ------------------------------------------------------------------------ - response = context_client_grpc.ListConnectionIds(ServiceId(**SERVICE_R1_R3_ID)) - assert len(response.connection_ids) == 0 - - response = context_client_grpc.ListConnections(ServiceId(**SERVICE_R1_R3_ID)) - assert len(response.connections) == 0 - - # ----- Dump state of database before create the object ------------------------------------------------------------ - db_entries = context_database.dump() - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 187 - - # ----- Create the object ------------------------------------------------------------------------------------------ - with pytest.raises(grpc.RpcError) as e: - WRONG_CONNECTION = copy.deepcopy(CONNECTION_R1_R3) - WRONG_CONNECTION['path_hops_endpoint_ids'][0]\ - ['topology_id']['context_id']['context_uuid']['uuid'] = 'wrong-context-uuid' - context_client_grpc.SetConnection(Connection(**WRONG_CONNECTION)) - assert e.value.code() == grpc.StatusCode.NOT_FOUND - # TODO: should we check that all endpoints belong to same topology? - # TODO: should we check that endpoints form links over the topology? - msg = 'EndPoint({:s}/{:s}:wrong-context-uuid/{:s}) not found'.format( - DEVICE_R1_UUID, WRONG_CONNECTION['path_hops_endpoint_ids'][0]['endpoint_uuid']['uuid'], DEFAULT_TOPOLOGY_UUID) - assert e.value.details() == msg - - response = context_client_grpc.SetConnection(Connection(**CONNECTION_R1_R3)) - assert response.connection_uuid.uuid == CONNECTION_R1_R3_UUID - - # ----- Check create event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True) - assert isinstance(event, ConnectionEvent) - assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert event.connection_id.connection_uuid.uuid == CONNECTION_R1_R3_UUID - - # ----- Update the object ------------------------------------------------------------------------------------------ - response = context_client_grpc.SetConnection(Connection(**CONNECTION_R1_R3)) - assert response.connection_uuid.uuid == CONNECTION_R1_R3_UUID - - # ----- Check update event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True) - assert isinstance(event, ConnectionEvent) - assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - assert event.connection_id.connection_uuid.uuid == CONNECTION_R1_R3_UUID - - # ----- Dump state of database after create/update the object ------------------------------------------------------ - db_entries = context_database.dump() - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 203 - - # ----- Get when the object exists --------------------------------------------------------------------------------- - response = context_client_grpc.GetConnection(ConnectionId(**CONNECTION_R1_R3_ID)) - assert response.connection_id.connection_uuid.uuid == CONNECTION_R1_R3_UUID - assert response.service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.service_id.service_uuid.uuid == SERVICE_R1_R3_UUID - assert len(response.path_hops_endpoint_ids) == 6 - assert len(response.sub_service_ids) == 2 - - # ----- List when the object exists -------------------------------------------------------------------------------- - response = context_client_grpc.ListConnectionIds(ServiceId(**SERVICE_R1_R3_ID)) - assert len(response.connection_ids) == 1 - assert response.connection_ids[0].connection_uuid.uuid == CONNECTION_R1_R3_UUID - - response = context_client_grpc.ListConnections(ServiceId(**SERVICE_R1_R3_ID)) - assert len(response.connections) == 1 - assert response.connections[0].connection_id.connection_uuid.uuid == CONNECTION_R1_R3_UUID - assert len(response.connections[0].path_hops_endpoint_ids) == 6 - assert len(response.connections[0].sub_service_ids) == 2 - - # ----- Remove the object ------------------------------------------------------------------------------------------ - context_client_grpc.RemoveConnection(ConnectionId(**CONNECTION_R1_R3_ID)) - context_client_grpc.RemoveService(ServiceId(**SERVICE_R1_R3_ID)) - context_client_grpc.RemoveService(ServiceId(**SERVICE_R2_R3_ID)) - context_client_grpc.RemoveService(ServiceId(**SERVICE_R1_R2_ID)) - context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R1_ID)) - context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R2_ID)) - context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R3_ID)) - context_client_grpc.RemoveTopology(TopologyId(**TOPOLOGY_ID)) - context_client_grpc.RemoveContext(ContextId(**CONTEXT_ID)) - - # ----- Check remove event ----------------------------------------------------------------------------------------- - events = events_collector.get_events(block=True, count=9) - - assert isinstance(events[0], ConnectionEvent) - assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[0].connection_id.connection_uuid.uuid == CONNECTION_R1_R3_UUID - - assert isinstance(events[1], ServiceEvent) - assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[1].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert events[1].service_id.service_uuid.uuid == SERVICE_R1_R3_UUID - - assert isinstance(events[2], ServiceEvent) - assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[2].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert events[2].service_id.service_uuid.uuid == SERVICE_R2_R3_UUID - - assert isinstance(events[3], ServiceEvent) - assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[3].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert events[3].service_id.service_uuid.uuid == SERVICE_R1_R2_UUID - - assert isinstance(events[4], DeviceEvent) - assert events[4].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[4].device_id.device_uuid.uuid == DEVICE_R1_UUID - - assert isinstance(events[5], DeviceEvent) - assert events[5].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[5].device_id.device_uuid.uuid == DEVICE_R2_UUID - - assert isinstance(events[6], DeviceEvent) - assert events[6].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[6].device_id.device_uuid.uuid == DEVICE_R3_UUID - - assert isinstance(events[7], TopologyEvent) - assert events[7].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[7].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert events[7].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - - assert isinstance(events[8], ContextEvent) - assert events[8].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[8].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- - events_collector.stop() - - # ----- Dump state of database after remove the object ------------------------------------------------------------- - db_entries = context_database.dump() - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 0 - - -def test_grpc_policy( - context_client_grpc : ContextClient, # pylint: disable=redefined-outer-name - context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name - context_database = context_db_mb[0] - - # ----- Clean the database ----------------------------------------------------------------------------------------- - context_database.clear_all() - - # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- - #events_collector = EventsCollector(context_client_grpc) - #events_collector.start() - - # ----- Get when the object does not exist ------------------------------------------------------------------------- - POLICY_ID = 'no-uuid' - DEFAULT_POLICY_ID = {'uuid': {'uuid': POLICY_ID}} - - with pytest.raises(grpc.RpcError) as e: - context_client_grpc.GetPolicyRule(PolicyRuleId(**DEFAULT_POLICY_ID)) - - assert e.value.code() == grpc.StatusCode.NOT_FOUND - assert e.value.details() == 'PolicyRule({:s}) not found'.format(POLICY_ID) - - # ----- List when the object does not exist ------------------------------------------------------------------------ - response = context_client_grpc.ListPolicyRuleIds(Empty()) - assert len(response.policyRuleIdList) == 0 - - response = context_client_grpc.ListPolicyRules(Empty()) - assert len(response.policyRules) == 0 - - # ----- Dump state of database before create the object ------------------------------------------------------------ - db_entries = context_database.dump() - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 0 - - # ----- Create the object ------------------------------------------------------------------------------------------ - response = context_client_grpc.SetPolicyRule(PolicyRule(**POLICY_RULE)) - assert response.uuid.uuid == POLICY_RULE_UUID - - # ----- Check create event ----------------------------------------------------------------------------------------- - # events = events_collector.get_events(block=True, count=1) - # assert isinstance(events[0], PolicyEvent) - # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - # assert events[0].policy_id.uuid.uuid == POLICY_RULE_UUID - - # ----- Update the object ------------------------------------------------------------------------------------------ - response = context_client_grpc.SetPolicyRule(PolicyRule(**POLICY_RULE)) - assert response.uuid.uuid == POLICY_RULE_UUID - - # ----- Dump state of database after create/update the object ------------------------------------------------------ - db_entries = context_database.dump() - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 2 - - # ----- Get when the object exists --------------------------------------------------------------------------------- - response = context_client_grpc.GetPolicyRule(PolicyRuleId(**POLICY_RULE_ID)) - assert response.device.policyRuleBasic.policyRuleId.uuid.uuid == POLICY_RULE_UUID - - # ----- List when the object exists -------------------------------------------------------------------------------- - response = context_client_grpc.ListPolicyRuleIds(Empty()) - assert len(response.policyRuleIdList) == 1 - assert response.policyRuleIdList[0].uuid.uuid == POLICY_RULE_UUID - - response = context_client_grpc.ListPolicyRules(Empty()) - assert len(response.policyRules) == 1 - - # ----- Remove the object ------------------------------------------------------------------------------------------ - context_client_grpc.RemovePolicyRule(PolicyRuleId(**POLICY_RULE_ID)) - - # ----- Check remove event ----------------------------------------------------------------------------------------- - # events = events_collector.get_events(block=True, count=2) - - # assert isinstance(events[0], PolicyEvent) - # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - # assert events[0].policy_id.uuid.uuid == POLICY_RULE_UUID - - - # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- - # events_collector.stop() - - # ----- Dump state of database after remove the object ------------------------------------------------------------- - db_entries = context_database.dump() - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 0 - - - -# ----- Test REST API methods ------------------------------------------------------------------------------------------ - -def test_rest_populate_database( - context_db_mb : Tuple[Database, MessageBroker], # pylint: disable=redefined-outer-name - context_service_grpc : ContextService # pylint: disable=redefined-outer-name - ): - database = context_db_mb[0] - database.clear_all() - populate(LOCAL_HOST, GRPC_PORT) - -def test_rest_get_context_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - reply = do_rest_request('/context_ids') - validate_context_ids(reply) - -def test_rest_get_contexts(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - reply = do_rest_request('/contexts') - validate_contexts(reply) - -def test_rest_get_context(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) - reply = do_rest_request('/context/{:s}'.format(context_uuid)) - validate_context(reply) - -def test_rest_get_topology_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) - reply = do_rest_request('/context/{:s}/topology_ids'.format(context_uuid)) - validate_topology_ids(reply) - -def test_rest_get_topologies(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) - reply = do_rest_request('/context/{:s}/topologies'.format(context_uuid)) - validate_topologies(reply) - -def test_rest_get_topology(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) - topology_uuid = urllib.parse.quote(DEFAULT_TOPOLOGY_UUID) - reply = do_rest_request('/context/{:s}/topology/{:s}'.format(context_uuid, topology_uuid)) - validate_topology(reply, num_devices=3, num_links=3) - -def test_rest_get_service_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) - reply = do_rest_request('/context/{:s}/service_ids'.format(context_uuid)) - validate_service_ids(reply) - -def test_rest_get_services(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) - reply = do_rest_request('/context/{:s}/services'.format(context_uuid)) - validate_services(reply) - -def test_rest_get_service(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) - service_uuid = urllib.parse.quote(SERVICE_R1_R2_UUID, safe='') - reply = do_rest_request('/context/{:s}/service/{:s}'.format(context_uuid, service_uuid)) - validate_service(reply) - -def test_rest_get_slice_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) - reply = do_rest_request('/context/{:s}/slice_ids'.format(context_uuid)) - #validate_slice_ids(reply) - -def test_rest_get_slices(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) - reply = do_rest_request('/context/{:s}/slices'.format(context_uuid)) - #validate_slices(reply) - -#def test_rest_get_slice(context_service_rest : RestServer): # pylint: disable=redefined-outer-name -# context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) -# slice_uuid = urllib.parse.quote(SLICE_R1_R2_UUID, safe='') -# reply = do_rest_request('/context/{:s}/slice/{:s}'.format(context_uuid, slice_uuid)) -# #validate_slice(reply) - -def test_rest_get_device_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - reply = do_rest_request('/device_ids') - validate_device_ids(reply) - -def test_rest_get_devices(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - reply = do_rest_request('/devices') - validate_devices(reply) - -def test_rest_get_device(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - device_uuid = urllib.parse.quote(DEVICE_R1_UUID, safe='') - reply = do_rest_request('/device/{:s}'.format(device_uuid)) - validate_device(reply) - -def test_rest_get_link_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - reply = do_rest_request('/link_ids') - validate_link_ids(reply) - -def test_rest_get_links(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - reply = do_rest_request('/links') - validate_links(reply) - -def test_rest_get_link(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - link_uuid = urllib.parse.quote(LINK_R1_R2_UUID, safe='') - reply = do_rest_request('/link/{:s}'.format(link_uuid)) - validate_link(reply) - -def test_rest_get_connection_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) - service_uuid = urllib.parse.quote(SERVICE_R1_R3_UUID, safe='') - reply = do_rest_request('/context/{:s}/service/{:s}/connection_ids'.format(context_uuid, service_uuid)) - validate_connection_ids(reply) - -def test_rest_get_connections(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) - service_uuid = urllib.parse.quote(SERVICE_R1_R3_UUID, safe='') - reply = do_rest_request('/context/{:s}/service/{:s}/connections'.format(context_uuid, service_uuid)) - validate_connections(reply) - -def test_rest_get_connection(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - connection_uuid = urllib.parse.quote(CONNECTION_R1_R3_UUID, safe='') - reply = do_rest_request('/connection/{:s}'.format(connection_uuid)) - validate_connection(reply) - -def test_rest_get_policyrule_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - reply = do_rest_request('/policyrule_ids') - #validate_policyrule_ids(reply) - -def test_rest_get_policyrules(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - reply = do_rest_request('/policyrules') - #validate_policyrules(reply) - -#def test_rest_get_policyrule(context_service_rest : RestServer): # pylint: disable=redefined-outer-name -# policyrule_uuid = urllib.parse.quote(POLICYRULE_UUID, safe='') -# reply = do_rest_request('/policyrule/{:s}'.format(policyrule_uuid)) -# #validate_policyrule(reply) - - -# ----- Test misc. Context internal tools ------------------------------------------------------------------------------ - -def test_tools_fast_string_hasher(): - with pytest.raises(TypeError) as e: - fast_hasher(27) - assert str(e.value) == "data(27) must be " + FASTHASHER_DATA_ACCEPTED_FORMAT + ", found <class 'int'>" - - with pytest.raises(TypeError) as e: - fast_hasher({27}) - assert str(e.value) == "data({27}) must be " + FASTHASHER_DATA_ACCEPTED_FORMAT + ", found <class 'set'>" - - with pytest.raises(TypeError) as e: - fast_hasher({'27'}) - assert str(e.value) == "data({'27'}) must be " + FASTHASHER_DATA_ACCEPTED_FORMAT + ", found <class 'set'>" - - with pytest.raises(TypeError) as e: - fast_hasher([27]) - assert str(e.value) == "data[0](27) must be " + FASTHASHER_ITEM_ACCEPTED_FORMAT + ", found <class 'int'>" - - fast_hasher('hello-world') - fast_hasher('hello-world'.encode('UTF-8')) - fast_hasher(['hello', 'world']) - fast_hasher(('hello', 'world')) - fast_hasher(['hello'.encode('UTF-8'), 'world'.encode('UTF-8')]) - fast_hasher(('hello'.encode('UTF-8'), 'world'.encode('UTF-8'))) -""" \ No newline at end of file diff --git a/src/context/service/database/Context.py b/src/context/service/database/Context.py index 85a06d65e..e136a4f83 100644 --- a/src/context/service/database/Context.py +++ b/src/context/service/database/Context.py @@ -43,8 +43,7 @@ def context_list_objs(db_engine : Engine) -> ContextList: def context_get(db_engine : Engine, request : ContextId) -> Context: context_uuid = context_get_uuid(request, allow_random=False) def callback(session : Session) -> Optional[Dict]: - obj : Optional[ContextModel] = session.query(ContextModel)\ - .filter_by(context_uuid=context_uuid).one_or_none() + obj : Optional[ContextModel] = session.query(ContextModel).filter_by(context_uuid=context_uuid).one_or_none() return None if obj is None else obj.dump() obj = run_transaction(sessionmaker(bind=db_engine), callback) if obj is None: diff --git a/src/context/service/database/Device.py b/src/context/service/database/Device.py index 7607a2349..8899b5a12 100644 --- a/src/context/service/database/Device.py +++ b/src/context/service/database/Device.py @@ -16,7 +16,7 @@ from sqlalchemy.dialects.postgresql import insert from sqlalchemy.engine import Engine from sqlalchemy.orm import Session, sessionmaker from sqlalchemy_cockroachdb import run_transaction -from typing import Dict, List, Optional, Set +from typing import Dict, List, Optional, Set, Tuple from common.proto.context_pb2 import Device, DeviceId, DeviceIdList, DeviceList from common.rpc_method_wrapper.ServiceExceptions import InvalidArgumentException, NotFoundException from common.tools.object_factory.Device import json_device_id @@ -47,8 +47,7 @@ def device_list_objs(db_engine : Engine) -> DeviceList: def device_get(db_engine : Engine, request : DeviceId) -> Device: device_uuid = device_get_uuid(request, allow_random=False) def callback(session : Session) -> Optional[Dict]: - obj : Optional[DeviceModel] = session.query(DeviceModel)\ - .filter_by(device_uuid=device_uuid).one_or_none() + obj : Optional[DeviceModel] = session.query(DeviceModel).filter_by(device_uuid=device_uuid).one_or_none() return None if obj is None else obj.dump() obj = run_transaction(sessionmaker(bind=db_engine), callback) if obj is None: @@ -58,7 +57,7 @@ def device_get(db_engine : Engine, request : DeviceId) -> Device: ]) return Device(**obj) -def device_set(db_engine : Engine, request : Device) -> bool: +def device_set(db_engine : Engine, request : Device) -> Tuple[DeviceId, bool]: raw_device_uuid = request.device_id.device_uuid.uuid raw_device_name = request.name device_name = raw_device_uuid if len(raw_device_name) == 0 else raw_device_name diff --git a/src/context/service/database/Link.py b/src/context/service/database/Link.py index 9f11cad23..7032a2138 100644 --- a/src/context/service/database/Link.py +++ b/src/context/service/database/Link.py @@ -42,8 +42,7 @@ def link_list_objs(db_engine : Engine) -> LinkList: def link_get(db_engine : Engine, request : LinkId) -> Link: link_uuid = link_get_uuid(request, allow_random=False) def callback(session : Session) -> Optional[Dict]: - obj : Optional[LinkModel] = session.query(LinkModel)\ - .filter_by(link_uuid=link_uuid).one_or_none() + obj : Optional[LinkModel] = session.query(LinkModel).filter_by(link_uuid=link_uuid).one_or_none() return None if obj is None else obj.dump() obj = run_transaction(sessionmaker(bind=db_engine), callback) if obj is None: @@ -53,7 +52,7 @@ def link_get(db_engine : Engine, request : LinkId) -> Link: ]) return Link(**obj) -def link_set(db_engine : Engine, request : Link) -> bool: +def link_set(db_engine : Engine, request : Link) -> Tuple[LinkId, bool]: raw_link_uuid = request.link_id.link_uuid.uuid raw_link_name = request.name link_name = raw_link_uuid if len(raw_link_name) == 0 else raw_link_name diff --git a/src/context/service/database/Service.py b/src/context/service/database/Service.py index 7e3d9d044..0230bc4d5 100644 --- a/src/context/service/database/Service.py +++ b/src/context/service/database/Service.py @@ -16,7 +16,7 @@ from sqlalchemy.dialects.postgresql import insert from sqlalchemy.engine import Engine from sqlalchemy.orm import Session, sessionmaker from sqlalchemy_cockroachdb import run_transaction -from typing import Dict, List, Optional +from typing import Dict, List, Optional, Tuple from common.proto.context_pb2 import ContextId, Service, ServiceId, ServiceIdList, ServiceList from common.rpc_method_wrapper.ServiceExceptions import InvalidArgumentException, NotFoundException from common.tools.object_factory.Context import json_context_id @@ -50,8 +50,7 @@ def service_list_objs(db_engine : Engine, request : ContextId) -> ServiceList: def service_get(db_engine : Engine, request : ServiceId) -> Service: _,service_uuid = service_get_uuid(request, allow_random=False) def callback(session : Session) -> Optional[Dict]: - obj : Optional[ServiceModel] = session.query(ServiceModel)\ - .filter_by(service_uuid=service_uuid).one_or_none() + obj : Optional[ServiceModel] = session.query(ServiceModel).filter_by(service_uuid=service_uuid).one_or_none() return None if obj is None else obj.dump() obj = run_transaction(sessionmaker(bind=db_engine), callback) if obj is None: @@ -63,7 +62,7 @@ def service_get(db_engine : Engine, request : ServiceId) -> Service: ]) return Service(**obj) -def service_set(db_engine : Engine, request : Service) -> bool: +def service_set(db_engine : Engine, request : Service) -> Tuple[ServiceId, bool]: raw_context_uuid = request.service_id.context_id.context_uuid.uuid raw_service_uuid = request.service_id.service_uuid.uuid raw_service_name = request.name diff --git a/src/context/service/database/Slice.py b/src/context/service/database/Slice.py new file mode 100644 index 000000000..318923555 --- /dev/null +++ b/src/context/service/database/Slice.py @@ -0,0 +1,216 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from sqlalchemy import and_, delete +from sqlalchemy.dialects.postgresql import insert +from sqlalchemy.engine import Engine +from sqlalchemy.orm import Session, sessionmaker +from sqlalchemy_cockroachdb import run_transaction +from typing import Dict, List, Optional, Set, Tuple +from common.proto.context_pb2 import ContextId, Slice, SliceId, SliceIdList, SliceList +from common.rpc_method_wrapper.ServiceExceptions import InvalidArgumentException, NotFoundException +from common.tools.object_factory.Context import json_context_id +from common.tools.object_factory.Slice import json_slice_id +from context.service.database.ConfigRule import compose_config_rules_data, upsert_config_rules +from context.service.database.Constraint import compose_constraints_data, upsert_constraints +from .models.enums.SliceStatus import grpc_to_enum__slice_status +from .models.RelationModels import SliceEndPointModel, SliceServiceModel #, SliceSubSliceModel +from .models.SliceModel import SliceModel +from .uuids.Context import context_get_uuid +from .uuids.EndPoint import endpoint_get_uuid +from .uuids.Service import service_get_uuid +from .uuids.Slice import slice_get_uuid + +def slice_list_ids(db_engine : Engine, request : ContextId) -> SliceIdList: + context_uuid = context_get_uuid(request, allow_random=False) + def callback(session : Session) -> List[Dict]: + obj_list : List[SliceModel] = session.query(SliceModel).filter_by(context_uuid=context_uuid).all() + #.options(selectinload(ContextModel.slice)).filter_by(context_uuid=context_uuid).one_or_none() + return [obj.dump_id() for obj in obj_list] + return SliceIdList(slice_ids=run_transaction(sessionmaker(bind=db_engine), callback)) + +def slice_list_objs(db_engine : Engine, request : ContextId) -> SliceList: + context_uuid = context_get_uuid(request, allow_random=False) + def callback(session : Session) -> List[Dict]: + obj_list : List[SliceModel] = session.query(SliceModel).filter_by(context_uuid=context_uuid).all() + #.options(selectinload(ContextModel.slice)).filter_by(context_uuid=context_uuid).one_or_none() + return [obj.dump() for obj in obj_list] + return SliceList(slices=run_transaction(sessionmaker(bind=db_engine), callback)) + +def slice_get(db_engine : Engine, request : SliceId) -> Slice: + _,slice_uuid = slice_get_uuid(request, allow_random=False) + def callback(session : Session) -> Optional[Dict]: + obj : Optional[SliceModel] = session.query(SliceModel).filter_by(slice_uuid=slice_uuid).one_or_none() + return None if obj is None else obj.dump() + obj = run_transaction(sessionmaker(bind=db_engine), callback) + if obj is None: + context_uuid = context_get_uuid(request.context_id, allow_random=False) + raw_slice_uuid = '{:s}/{:s}'.format(request.context_id.context_uuid.uuid, request.slice_uuid.uuid) + raise NotFoundException('Slice', raw_slice_uuid, extra_details=[ + 'context_uuid generated was: {:s}'.format(context_uuid), + 'slice_uuid generated was: {:s}'.format(slice_uuid), + ]) + return Slice(**obj) + +def slice_set(db_engine : Engine, request : Slice) -> Tuple[SliceId, bool]: + raw_context_uuid = request.slice_id.context_id.context_uuid.uuid + raw_slice_uuid = request.slice_id.slice_uuid.uuid + raw_slice_name = request.name + slice_name = raw_slice_uuid if len(raw_slice_name) == 0 else raw_slice_name + context_uuid,slice_uuid = slice_get_uuid(request.slice_id, slice_name=slice_name, allow_random=True) + + slice_status = grpc_to_enum__slice_status(request.slice_status.slice_status) + + slice_endpoints_data : List[Dict] = list() + for i,endpoint_id in enumerate(request.slice_endpoint_ids): + endpoint_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid + if len(endpoint_context_uuid) == 0: endpoint_context_uuid = context_uuid + if endpoint_context_uuid not in {raw_context_uuid, context_uuid}: + raise InvalidArgumentException( + 'request.slice_endpoint_ids[{:d}].topology_id.context_id.context_uuid.uuid'.format(i), + endpoint_context_uuid, + ['should be == request.slice_id.context_id.context_uuid.uuid({:s})'.format(raw_context_uuid)]) + + _, _, endpoint_uuid = endpoint_get_uuid(endpoint_id, allow_random=False) + slice_endpoints_data.append({ + 'slice_uuid' : slice_uuid, + 'endpoint_uuid': endpoint_uuid, + }) + + slice_services_data : List[Dict] = list() + for i,service_id in enumerate(request.slice_service_ids): + _, service_uuid = service_get_uuid(service_id, allow_random=False) + slice_services_data.append({ + 'slice_uuid' : slice_uuid, + 'service_uuid': service_uuid, + }) + + #slice_subslices_data : List[Dict] = list() + #for i,subslice_id in enumerate(request.slice_subslice_ids): + # _, subslice_uuid = slice_get_uuid(subslice_id, allow_random=False) + # slice_subslices_data.append({ + # 'slice_uuid' : slice_uuid, + # 'subslice_uuid': subslice_uuid, + # }) + + constraints = compose_constraints_data(request.slice_constraints, slice_uuid=slice_uuid) + config_rules = compose_config_rules_data(request.slice_config.config_rules, slice_uuid=slice_uuid) + + slice_data = [{ + 'context_uuid' : context_uuid, + 'slice_uuid' : slice_uuid, + 'slice_name' : slice_name, + 'slice_status' : slice_status, + 'slice_owner_uuid' : request.slice_owner.owner_uuid.uuid, + 'slice_owner_string': request.slice_owner.owner_string, + }] + + def callback(session : Session) -> None: + stmt = insert(SliceModel).values(slice_data) + stmt = stmt.on_conflict_do_update( + index_elements=[SliceModel.slice_uuid], + set_=dict( + slice_name = stmt.excluded.slice_name, + slice_status = stmt.excluded.slice_status, + slice_owner_uuid = stmt.excluded.slice_owner_uuid, + slice_owner_string = stmt.excluded.slice_owner_string, + ) + ) + session.execute(stmt) + + stmt = insert(SliceEndPointModel).values(slice_endpoints_data) + stmt = stmt.on_conflict_do_nothing( + index_elements=[SliceEndPointModel.slice_uuid, SliceEndPointModel.endpoint_uuid] + ) + session.execute(stmt) + + stmt = insert(SliceServiceModel).values(slice_services_data) + stmt = stmt.on_conflict_do_nothing( + index_elements=[SliceServiceModel.slice_uuid, SliceServiceModel.service_uuid] + ) + session.execute(stmt) + + #stmt = insert(SliceSubSliceModel).values(slice_subslices_data) + #stmt = stmt.on_conflict_do_nothing( + # index_elements=[SliceSubSliceModel.slice_uuid, SliceSubSliceModel.subslice_uuid] + #) + #session.execute(stmt) + + upsert_constraints(session, constraints, slice_uuid=slice_uuid) + upsert_config_rules(session, config_rules, slice_uuid=slice_uuid) + + run_transaction(sessionmaker(bind=db_engine), callback) + updated = False # TODO: improve and check if created/updated + return SliceId(**json_slice_id(slice_uuid, json_context_id(context_uuid))),updated + +def slice_unset(db_engine : Engine, request : Slice) -> Tuple[SliceId, bool]: + raw_context_uuid = request.slice_id.context_id.context_uuid.uuid + raw_slice_uuid = request.slice_id.slice_uuid.uuid + raw_slice_name = request.name + slice_name = raw_slice_uuid if len(raw_slice_name) == 0 else raw_slice_name + context_uuid,slice_uuid = slice_get_uuid(request.slice_id, slice_name=slice_name, allow_random=False) + + if len(request.slice_constraints) > 0: raise NotImplementedError('UnsetSlice: removal of constraints') + if len(request.slice_config.config_rules) > 0: raise NotImplementedError('UnsetSlice: removal of config rules') + if len(request.slice_endpoint_ids) > 0: raise NotImplementedError('UnsetSlice: removal of endpoints') + + slice_endpoint_uuids : Set[str] = set() + for i,endpoint_id in enumerate(request.slice_endpoint_ids): + endpoint_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid + if len(endpoint_context_uuid) == 0: endpoint_context_uuid = context_uuid + if endpoint_context_uuid not in {raw_context_uuid, context_uuid}: + raise InvalidArgumentException( + 'request.slice_endpoint_ids[{:d}].topology_id.context_id.context_uuid.uuid'.format(i), + endpoint_context_uuid, + ['should be == request.slice_id.context_id.context_uuid.uuid({:s})'.format(raw_context_uuid)]) + slice_endpoint_uuids.add(endpoint_get_uuid(endpoint_id, allow_random=False)[2]) + + slice_service_uuids : Set[str] = { + service_get_uuid(service_id, allow_random=False)[1] + for service_id in request.slice_service_ids + } + + slice_subslice_uuids : Set[str] = { + slice_get_uuid(subslice_id, allow_random=False)[1] + for subslice_id in request.slice_subslice_ids + } + + def callback(session : Session) -> bool: + num_deletes = 0 + num_deletes += session.query(SliceServiceModel)\ + .filter_by(and_( + SliceServiceModel.slice_uuid == slice_uuid, + SliceServiceModel.service_uuid.in_(slice_service_uuids) + )).delete() + #num_deletes += session.query(SliceSubSliceModel)\ + # .filter_by(and_( + # SliceSubSliceModel.slice_uuid == slice_uuid, + # SliceSubSliceModel.subslice_uuid.in_(slice_subslice_uuids) + # )).delete() + num_deletes += session.query(SliceEndPointModel)\ + .filter_by(and_( + SliceEndPointModel.slice_uuid == slice_uuid, + SliceEndPointModel.endpoint_uuid.in_(slice_endpoint_uuids) + )).delete() + return num_deletes > 0 + + updated = run_transaction(sessionmaker(bind=db_engine), callback) + return SliceId(**json_slice_id(slice_uuid, json_context_id(context_uuid))),updated + +def slice_delete(db_engine : Engine, request : SliceId) -> bool: + _,slice_uuid = slice_get_uuid(request, allow_random=False) + def callback(session : Session) -> bool: + num_deleted = session.query(SliceModel).filter_by(slice_uuid=slice_uuid).delete() + return num_deleted > 0 + return run_transaction(sessionmaker(bind=db_engine), callback) diff --git a/src/context/service/database/Topology.py b/src/context/service/database/Topology.py index ae8d0a8bd..a7272713c 100644 --- a/src/context/service/database/Topology.py +++ b/src/context/service/database/Topology.py @@ -17,7 +17,7 @@ from sqlalchemy.dialects.postgresql import insert from sqlalchemy.engine import Engine from sqlalchemy.orm import Session, sessionmaker from sqlalchemy_cockroachdb import run_transaction -from typing import Dict, List, Optional +from typing import Dict, List, Optional, Tuple from common.proto.context_pb2 import ContextId, Topology, TopologyId, TopologyIdList, TopologyList from common.rpc_method_wrapper.ServiceExceptions import NotFoundException from common.tools.object_factory.Context import json_context_id @@ -60,7 +60,7 @@ def topology_get(db_engine : Engine, request : TopologyId) -> Topology: ]) return Topology(**obj) -def topology_set(db_engine : Engine, request : Topology) -> bool: +def topology_set(db_engine : Engine, request : Topology) -> Tuple[TopologyId, bool]: topology_name = request.name if len(topology_name) == 0: topology_name = request.topology_id.topology_uuid.uuid context_uuid,topology_uuid = topology_get_uuid(request.topology_id, topology_name=topology_name, allow_random=True) diff --git a/src/context/service/database/models/ConfigRuleModel.py b/src/context/service/database/models/ConfigRuleModel.py index 11e151ef6..0e4b94427 100644 --- a/src/context/service/database/models/ConfigRuleModel.py +++ b/src/context/service/database/models/ConfigRuleModel.py @@ -30,6 +30,7 @@ class ConfigRuleModel(_Base): configrule_uuid = Column(UUID(as_uuid=False), primary_key=True) device_uuid = Column(ForeignKey('device.device_uuid', ondelete='CASCADE'), nullable=True) service_uuid = Column(ForeignKey('service.service_uuid', ondelete='CASCADE'), nullable=True) + slice_uuid = Column(ForeignKey('slice.slice_uuid', ondelete='CASCADE'), nullable=True) position = Column(Integer, nullable=False) kind = Column(Enum(ConfigRuleKindEnum), nullable=False) action = Column(Enum(ORM_ConfigActionEnum), nullable=False) @@ -37,8 +38,9 @@ class ConfigRuleModel(_Base): __table_args__ = ( CheckConstraint(position >= 0, name='check_position_value'), - #UniqueConstraint('device_uuid', 'position', name='unique_per_device'), + #UniqueConstraint('device_uuid', 'position', name='unique_per_device' ), #UniqueConstraint('service_uuid', 'position', name='unique_per_service'), + #UniqueConstraint('slice_uuid', 'position', name='unique_per_slice' ), ) def dump(self) -> Dict: diff --git a/src/context/service/database/models/ConstraintModel.py b/src/context/service/database/models/ConstraintModel.py index 118ae9505..90adb9ce7 100644 --- a/src/context/service/database/models/ConstraintModel.py +++ b/src/context/service/database/models/ConstraintModel.py @@ -30,7 +30,8 @@ class ConstraintModel(_Base): __tablename__ = 'constraint' constraint_uuid = Column(UUID(as_uuid=False), primary_key=True) - service_uuid = Column(ForeignKey('service.service_uuid', ondelete='CASCADE'), nullable=False) + service_uuid = Column(ForeignKey('service.service_uuid', ondelete='CASCADE'), nullable=True) + slice_uuid = Column(ForeignKey('slice.slice_uuid', ondelete='CASCADE'), nullable=True) position = Column(Integer, nullable=False) kind = Column(Enum(ConstraintKindEnum), nullable=False) data = Column(String, nullable=False) @@ -38,6 +39,7 @@ class ConstraintModel(_Base): __table_args__ = ( CheckConstraint(position >= 0, name='check_position_value'), #UniqueConstraint('service_uuid', 'position', name='unique_per_service'), + #UniqueConstraint('slice_uuid', 'position', name='unique_per_slice' ), ) def dump(self) -> Dict: diff --git a/src/context/service/database/models/ContextModel.py b/src/context/service/database/models/ContextModel.py index 1a282e8bd..ffeb10111 100644 --- a/src/context/service/database/models/ContextModel.py +++ b/src/context/service/database/models/ContextModel.py @@ -26,7 +26,7 @@ class ContextModel(_Base): topologies = relationship('TopologyModel', back_populates='context') services = relationship('ServiceModel', back_populates='context') - #slices = relationship('SliceModel', back_populates='context') + slices = relationship('SliceModel', back_populates='context') def dump_id(self) -> Dict: return {'context_uuid': {'uuid': self.context_uuid}} @@ -37,5 +37,5 @@ class ContextModel(_Base): 'name' : self.context_name, 'topology_ids': [obj.dump_id() for obj in self.topologies], 'service_ids' : [obj.dump_id() for obj in self.services ], - #'slice_ids' : [obj.dump_id() for obj in self.slices ], + 'slice_ids' : [obj.dump_id() for obj in self.slices ], } diff --git a/src/context/service/database/models/RelationModels.py b/src/context/service/database/models/RelationModels.py index a57d85eb3..468b14519 100644 --- a/src/context/service/database/models/RelationModels.py +++ b/src/context/service/database/models/RelationModels.py @@ -40,20 +40,32 @@ class ServiceEndPointModel(_Base): service = relationship('ServiceModel', back_populates='service_endpoints', lazy='joined') endpoint = relationship('EndPointModel', lazy='joined') # back_populates='service_endpoints' -# class SliceEndPointModel(Model): -# pk = PrimaryKeyField() -# slice_fk = ForeignKeyField(SliceModel) -# endpoint_fk = ForeignKeyField(EndPointModel) +class SliceEndPointModel(_Base): + __tablename__ = 'slice_endpoint' -# class SliceServiceModel(Model): -# pk = PrimaryKeyField() -# slice_fk = ForeignKeyField(SliceModel) -# service_fk = ForeignKeyField(ServiceModel) + slice_uuid = Column(ForeignKey('slice.slice_uuid', ondelete='CASCADE' ), primary_key=True) + endpoint_uuid = Column(ForeignKey('endpoint.endpoint_uuid', ondelete='RESTRICT'), primary_key=True) -# class SliceSubSliceModel(Model): -# pk = PrimaryKeyField() -# slice_fk = ForeignKeyField(SliceModel) -# sub_slice_fk = ForeignKeyField(SliceModel) + slice = relationship('SliceModel', back_populates='slice_endpoints', lazy='joined') + endpoint = relationship('EndPointModel', lazy='joined') # back_populates='slice_endpoints' + +class SliceServiceModel(_Base): + __tablename__ = 'slice_service' + + slice_uuid = Column(ForeignKey('slice.slice_uuid', ondelete='CASCADE' ), primary_key=True) + service_uuid = Column(ForeignKey('service.service_uuid', ondelete='RESTRICT'), primary_key=True) + + slice = relationship('SliceModel', back_populates='slice_services', lazy='joined') + service = relationship('ServiceModel', lazy='joined') # back_populates='slice_services' + +#class SliceSubSliceModel(_Base): +# __tablename__ = 'slice_subslice' +# +# slice_uuid = Column(ForeignKey('slice.slice_uuid', ondelete='CASCADE' ), primary_key=True) +# subslice_uuid = Column(ForeignKey('slice.slice_uuid', ondelete='RESTRICT'), primary_key=True) +# +# slice = relationship('SliceModel', foreign_keys=[slice_uuid], lazy='joined') #back_populates='slice_subslices' +# subslice = relationship('SliceModel', foreign_keys=[subslice_uuid], lazy='joined') #back_populates='slice_subslices' class TopologyDeviceModel(_Base): __tablename__ = 'topology_device' diff --git a/src/context/service/database/models/SliceModel.py b/src/context/service/database/models/SliceModel.py index 2b03e6122..ef2b64962 100644 --- a/src/context/service/database/models/SliceModel.py +++ b/src/context/service/database/models/SliceModel.py @@ -12,111 +12,64 @@ # See the License for the specific language governing permissions and # limitations under the License. -import functools, logging, operator -from enum import Enum -from typing import Dict, List -from common.orm.fields.EnumeratedField import EnumeratedField -from common.orm.fields.ForeignKeyField import ForeignKeyField -from common.orm.fields.PrimaryKeyField import PrimaryKeyField -from common.orm.fields.StringField import StringField -from common.orm.model.Model import Model -from common.orm.HighLevel import get_related_objects -from common.proto.context_pb2 import SliceStatusEnum -from .ConfigRuleModel import ConfigModel -from .ConstraintModel import ConstraintsModel -from .models.ContextModel import ContextModel -from .Tools import grpc_to_enum - -LOGGER = logging.getLogger(__name__) - -class ORM_SliceStatusEnum(Enum): - UNDEFINED = SliceStatusEnum.SLICESTATUS_UNDEFINED - PLANNED = SliceStatusEnum.SLICESTATUS_PLANNED - INIT = SliceStatusEnum.SLICESTATUS_INIT - ACTIVE = SliceStatusEnum.SLICESTATUS_ACTIVE - DEINIT = SliceStatusEnum.SLICESTATUS_DEINIT - -grpc_to_enum__slice_status = functools.partial( - grpc_to_enum, SliceStatusEnum, ORM_SliceStatusEnum) - -class SliceModel(Model): - pk = PrimaryKeyField() - context_fk = ForeignKeyField(ContextModel) - slice_uuid = StringField(required=True, allow_empty=False) - slice_constraints_fk = ForeignKeyField(ConstraintsModel) - slice_status = EnumeratedField(ORM_SliceStatusEnum, required=True) - slice_config_fk = ForeignKeyField(ConfigModel) - slice_owner_uuid = StringField(required=False, allow_empty=True) - slice_owner_string = StringField(required=False, allow_empty=True) - - def delete(self) -> None: - # pylint: disable=import-outside-toplevel - from .RelationModels import SliceEndPointModel, SliceServiceModel, SliceSubSliceModel - - for db_slice_endpoint_pk,_ in self.references(SliceEndPointModel): - SliceEndPointModel(self.database, db_slice_endpoint_pk).delete() - - for db_slice_service_pk,_ in self.references(SliceServiceModel): - SliceServiceModel(self.database, db_slice_service_pk).delete() - - for db_slice_subslice_pk,_ in self.references(SliceSubSliceModel): - SliceSubSliceModel(self.database, db_slice_subslice_pk).delete() - - super().delete() - - ConfigModel(self.database, self.slice_config_fk).delete() - ConstraintsModel(self.database, self.slice_constraints_fk).delete() +import operator +from sqlalchemy import Column, Enum, ForeignKey, String +from sqlalchemy.dialects.postgresql import UUID +from sqlalchemy.orm import relationship +from typing import Dict +from .enums.SliceStatus import ORM_SliceStatusEnum +from ._Base import _Base + +class SliceModel(_Base): + __tablename__ = 'slice' + + slice_uuid = Column(UUID(as_uuid=False), primary_key=True) + context_uuid = Column(ForeignKey('context.context_uuid'), nullable=False) + slice_name = Column(String, nullable=True) + slice_status = Column(Enum(ORM_SliceStatusEnum), nullable=False) + slice_owner_uuid = Column(String, nullable=True) + slice_owner_string = Column(String, nullable=True) + + context = relationship('ContextModel', back_populates='slices') + slice_endpoints = relationship('SliceEndPointModel') # lazy='joined', back_populates='slice' + slice_services = relationship('SliceServiceModel') # lazy='joined', back_populates='slice' + #slice_subslices = relationship('SliceSubSliceModel') # lazy='joined', back_populates='slice' + constraints = relationship('ConstraintModel', passive_deletes=True) # lazy='joined', back_populates='slice' + config_rules = relationship('ConfigRuleModel', passive_deletes=True) # lazy='joined', back_populates='slice' def dump_id(self) -> Dict: - context_id = ContextModel(self.database, self.context_fk).dump_id() return { - 'context_id': context_id, + 'context_id': self.context.dump_id(), 'slice_uuid': {'uuid': self.slice_uuid}, } - def dump_endpoint_ids(self) -> List[Dict]: - from .RelationModels import SliceEndPointModel # pylint: disable=import-outside-toplevel - db_endpoints = get_related_objects(self, SliceEndPointModel, 'endpoint_fk') - return [db_endpoint.dump_id() for db_endpoint in sorted(db_endpoints, key=operator.attrgetter('pk'))] - - def dump_constraints(self) -> List[Dict]: - return ConstraintsModel(self.database, self.slice_constraints_fk).dump() - - def dump_config(self) -> Dict: - return ConfigModel(self.database, self.slice_config_fk).dump() - - def dump_service_ids(self) -> List[Dict]: - from .RelationModels import SliceServiceModel # pylint: disable=import-outside-toplevel - db_services = get_related_objects(self, SliceServiceModel, 'service_fk') - return [db_service.dump_id() for db_service in sorted(db_services, key=operator.attrgetter('pk'))] - - def dump_subslice_ids(self) -> List[Dict]: - from .RelationModels import SliceSubSliceModel # pylint: disable=import-outside-toplevel - db_subslices = get_related_objects(self, SliceSubSliceModel, 'sub_slice_fk') - return [ - db_subslice.dump_id() - for db_subslice in sorted(db_subslices, key=operator.attrgetter('pk')) - if db_subslice.pk != self.pk # if I'm subslice of other slice, I will appear as subslice of myself - ] - - def dump( # pylint: disable=arguments-differ - self, include_endpoint_ids=True, include_constraints=True, include_config_rules=True, - include_service_ids=True, include_subslice_ids=True - ) -> Dict: - result = { - 'slice_id': self.dump_id(), - 'slice_status': {'slice_status': self.slice_status.value}, + def dump(self) -> Dict: + return { + 'slice_id' : self.dump_id(), + 'name' : self.slice_name, + 'slice_status' : {'slice_status': self.slice_status.value}, + 'slice_endpoint_ids': [ + slice_endpoint.endpoint.dump_id() + for slice_endpoint in self.slice_endpoints + ], + 'slice_constraints' : [ + constraint.dump() + for constraint in sorted(self.constraints, key=operator.attrgetter('position')) + ], + 'slice_config' : {'config_rules': [ + config_rule.dump() + for config_rule in sorted(self.config_rules, key=operator.attrgetter('position')) + ]}, + 'slice_service_ids': [ + slice_service.service.dump_id() + for slice_service in self.slice_services + ], + 'slice_subslice_ids': [ + #slice_subslice.subslice.dump_id() + #for slice_subslice in self.slice_subslices + ], + 'slice_owner': { + 'owner_uuid': {'uuid': self.slice_owner_uuid}, + 'owner_string': self.slice_owner_string + } } - if include_endpoint_ids: result['slice_endpoint_ids'] = self.dump_endpoint_ids() - if include_constraints: result['slice_constraints'] = self.dump_constraints() - if include_config_rules: result.setdefault('slice_config', {})['config_rules'] = self.dump_config() - if include_service_ids: result['slice_service_ids'] = self.dump_service_ids() - if include_subslice_ids: result['slice_subslice_ids'] = self.dump_subslice_ids() - - if len(self.slice_owner_uuid) > 0: - result.setdefault('slice_owner', {}).setdefault('owner_uuid', {})['uuid'] = self.slice_owner_uuid - - if len(self.slice_owner_string) > 0: - result.setdefault('slice_owner', {})['owner_string'] = self.slice_owner_string - - return result diff --git a/src/context/service/_old_code/RestServer.py b/src/context/service/database/models/enums/SliceStatus.py similarity index 54% rename from src/context/service/_old_code/RestServer.py rename to src/context/service/database/models/enums/SliceStatus.py index 289e92a3c..440f5ba2a 100644 --- a/src/context/service/_old_code/RestServer.py +++ b/src/context/service/database/models/enums/SliceStatus.py @@ -12,12 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -from common.Constants import ServiceNameEnum -from common.Settings import get_service_baseurl_http, get_service_port_http -from common.tools.service.GenericRestServer import GenericRestServer +import enum, functools +from common.proto.context_pb2 import SliceStatusEnum +from ._GrpcToEnum import grpc_to_enum -class RestServer(GenericRestServer): - def __init__(self, cls_name: str = __name__) -> None: - bind_port = get_service_port_http(ServiceNameEnum.CONTEXT) - base_url = get_service_baseurl_http(ServiceNameEnum.CONTEXT) - super().__init__(bind_port, base_url, cls_name=cls_name) +class ORM_SliceStatusEnum(enum.Enum): + UNDEFINED = SliceStatusEnum.SLICESTATUS_UNDEFINED + PLANNED = SliceStatusEnum.SLICESTATUS_PLANNED + INIT = SliceStatusEnum.SLICESTATUS_INIT + ACTIVE = SliceStatusEnum.SLICESTATUS_ACTIVE + DEINIT = SliceStatusEnum.SLICESTATUS_DEINIT + +grpc_to_enum__slice_status = functools.partial( + grpc_to_enum, SliceStatusEnum, ORM_SliceStatusEnum) diff --git a/src/context/service/database/uuids/Slice.py b/src/context/service/database/uuids/Slice.py new file mode 100644 index 000000000..3b46e582e --- /dev/null +++ b/src/context/service/database/uuids/Slice.py @@ -0,0 +1,37 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Tuple +from common.proto.context_pb2 import SliceId +from common.rpc_method_wrapper.ServiceExceptions import InvalidArgumentsException +from ._Builder import get_uuid_from_string, get_uuid_random +from .Context import context_get_uuid + +def slice_get_uuid( + slice_id : SliceId, slice_name : str = '', allow_random : bool = False +) -> Tuple[str, str]: + context_uuid = context_get_uuid(slice_id.context_id, allow_random=False) + raw_slice_uuid = slice_id.slice_uuid.uuid + + if len(raw_slice_uuid) > 0: + return context_uuid, get_uuid_from_string(raw_slice_uuid, prefix_for_name=context_uuid) + if len(slice_name) > 0: + return context_uuid, get_uuid_from_string(slice_name, prefix_for_name=context_uuid) + if allow_random: + return context_uuid, get_uuid_random() + + raise InvalidArgumentsException([ + ('slice_id.slice_uuid.uuid', raw_slice_uuid), + ('name', slice_name), + ], extra_details=['At least one is required to produce a Slice UUID']) diff --git a/src/context/tests/Objects.py b/src/context/tests/Objects.py index c350d4f20..93dd6f2c6 100644 --- a/src/context/tests/Objects.py +++ b/src/context/tests/Objects.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Dict, List, Tuple +from typing import Dict, List, Optional, Tuple from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME from common.proto.kpi_sample_types_pb2 import KpiSampleType from common.tools.object_factory.ConfigRule import json_config_rule_set @@ -23,6 +23,7 @@ from common.tools.object_factory.Device import json_device_id, json_device_packe from common.tools.object_factory.EndPoint import json_endpoint, json_endpoint_id from common.tools.object_factory.Link import json_link, json_link_id from common.tools.object_factory.Service import json_service_id, json_service_l3nm_planned +from common.tools.object_factory.Slice import json_slice_id, json_slice from common.tools.object_factory.Topology import json_topology, json_topology_id from common.tools.object_factory.PolicyRule import json_policy_rule, json_policy_rule_id @@ -116,6 +117,36 @@ SERVICE_R2_R3_NAME, SERVICE_R2_R3_ID, SERVICE_R2_R3 = compose_service( 'R2-R3', [(DEVICE_R2_ID, '2.3'), (DEVICE_R3_ID, '2.2')], 23.1, 3.4) +# ----- Slice ---------------------------------------------------------------------------------------------------------- +def compose_slice( + name : str, endpoint_ids : List[Tuple[str, str]], latency_ms : float, jitter_us : float, + service_ids : List[Dict] = [], subslice_ids : List[Dict] = [], owner : Optional[Dict] = None +) -> Tuple[str, Dict, Dict]: + slice_id = json_slice_id(name, context_id=CONTEXT_ID) + endpoint_ids = [ + json_endpoint_id(device_id, endpoint_name, topology_id=TOPOLOGY_ID) + for device_id, endpoint_name in endpoint_ids + ] + constraints = [ + json_constraint_custom('latency[ms]', str(latency_ms)), + json_constraint_custom('jitter[us]', str(jitter_us)), + ] + config_rules = [ + json_config_rule_set('svc/rsrc1/value', 'value7'), + json_config_rule_set('svc/rsrc2/value', 'value8'), + json_config_rule_set('svc/rsrc3/value', 'value9'), + ] + slice_ = json_slice( + name, context_id=CONTEXT_ID, endpoint_ids=endpoint_ids, constraints=constraints, config_rules=config_rules, + service_ids=service_ids, subslice_ids=subslice_ids, owner=owner) + return name, slice_id, slice_ + +SLICE_R1_R3_NAME, SLICE_R1_R3_ID, SLICE_R1_R3 = compose_slice( + 'R1-R3', [(DEVICE_R1_ID, '2.3'), (DEVICE_R3_ID, '2.1')], 15.2, 1.2, + service_ids=[SERVICE_R1_R2_ID, SERVICE_R2_R3_ID], + subslice_ids=[], owner=None) + + # ----- Connection ----------------------------------------------------------------------------------------------------- def compose_connection( name : str, service_id : Dict, endpoint_ids : List[Tuple[str, str]], sub_service_ids : List[Dict] = [] diff --git a/src/context/tests/__test_unitary.py b/src/context/tests/__test_unitary.py deleted file mode 100644 index e49fd2752..000000000 --- a/src/context/tests/__test_unitary.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -#import pytest -#from context.client.ContextClient import ContextClient -#from .test_unitary_context import grpc_context -#from ._test_topology import grpc_topology -#from ._test_device import grpc_device -#from ._test_link import grpc_link -#from ._test_service import grpc_service -#from ._test_slice import grpc_slice -#from ._test_connection import grpc_connection -#from ._test_policy import grpc_policy - -#def test_grpc_context(context_client_grpc : ContextClient) -> None: -# grpc_context(context_client_grpc) - -#@pytest.mark.depends(on=['test_grpc_context']) -#def test_grpc_topology(context_client_grpc : ContextClient) -> None: -# grpc_topology(context_client_grpc) - -#@pytest.mark.depends(on=['test_grpc_topology']) -#def test_grpc_device(context_client_grpc : ContextClient) -> None: -# grpc_device(context_client_grpc) - -#@pytest.mark.depends(on=['test_grpc_device']) -#def test_grpc_link(context_client_grpc : ContextClient) -> None: -# grpc_link(context_client_grpc) - -#@pytest.mark.depends(on=['test_grpc_link']) -#def test_grpc_service(context_client_grpc : ContextClient) -> None: -# grpc_service(context_client_grpc) - -#@pytest.mark.depends(on=['test_grpc_service']) -#def test_grpc_slice(context_client_grpc : ContextClient) -> None: -# grpc_slice(context_client_grpc) - -#@pytest.mark.depends(on=['test_grpc_slice']) -#def test_grpc_connection(context_client_grpc : ContextClient) -> None: -# grpc_connection(context_client_grpc) - -#@pytest.mark.depends(on=['test_grpc_connection']) -#def test_grpc_policy(context_client_grpc : ContextClient) -> None: -# grpc_policy(context_client_grpc) diff --git a/src/context/tests/_test_slice.py b/src/context/tests/_test_slice.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/context/tests/conftest.py b/src/context/tests/conftest.py index 8bf4156c5..f5ef4efca 100644 --- a/src/context/tests/conftest.py +++ b/src/context/tests/conftest.py @@ -16,7 +16,7 @@ import json, os, pytest, sqlalchemy from _pytest.config import Config from _pytest.terminal import TerminalReporter from prettytable import PrettyTable -from typing import Any, Dict, List, Tuple +from typing import Any, Dict, List, Set, Tuple from common.Constants import ServiceNameEnum from common.Settings import ( ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, ENVVAR_SUFIX_SERVICE_PORT_HTTP, get_env_var_name, @@ -71,7 +71,8 @@ def pytest_terminal_summary( ): yield - method_to_metric_fields : Dict[str, Dict[str, Dict[str, Any]]]= dict() + method_to_metric_fields : Dict[str, Dict[str, Dict[str, Any]]] = dict() + bucket_bounds : Set[str] = set() for raw_metric_name,raw_metric_data in RAW_METRICS.items(): if '_COUNTER_' in raw_metric_name: method_name,metric_name = raw_metric_name.split('_COUNTER_') @@ -81,6 +82,7 @@ def pytest_terminal_summary( raise Exception('Unsupported metric: {:s}'.format(raw_metric_name)) # pragma: no cover metric_data = method_to_metric_fields.setdefault(method_name, dict()).setdefault(metric_name, dict()) for field_name,labels,value,_,_ in raw_metric_data._child_samples(): + if field_name == '_bucket': bucket_bounds.add(labels['le']) if len(labels) > 0: field_name = '{:s}:{:s}'.format(field_name, json.dumps(labels, sort_keys=True)) metric_data[field_name] = value #print('method_to_metric_fields', method_to_metric_fields) @@ -90,10 +92,14 @@ def pytest_terminal_summary( if str_duration == '---': return 0.0 return float(str_duration.replace(' ms', '')) - field_names = ['Method', 'Started', 'Completed', 'Failed', 'avg(Duration)'] - pt_stats = PrettyTable(field_names=field_names, sortby='avg(Duration)', sort_key=sort_stats_key, reversesort=True) + field_names = ['Method', 'TOT', 'OK', 'ERR', 'avg(Dur)'] + bucket_bounds = sorted(bucket_bounds, key=lambda b: float(b)) + bucket_column_names = ['<={:s}'.format(bucket_bound) for bucket_bound in bucket_bounds] + field_names.extend(bucket_column_names) + + pt_stats = PrettyTable(field_names=field_names, sortby='avg(Dur)', sort_key=sort_stats_key, reversesort=True) + for f in field_names: pt_stats.align[f] = 'r' for f in ['Method']: pt_stats.align[f] = 'l' - for f in ['Started', 'Completed', 'Failed', 'avg(Duration)']: pt_stats.align[f] = 'r' for method_name,metrics in method_to_metric_fields.items(): counter_started_value = int(metrics['STARTED']['_total']) @@ -105,10 +111,29 @@ def pytest_terminal_summary( duration_count_value = float(metrics['DURATION']['_count']) duration_sum_value = float(metrics['DURATION']['_sum']) duration_avg_value = duration_sum_value/duration_count_value - pt_stats.add_row([ + + row = [ method_name, str(counter_started_value), str(counter_completed_value), str(counter_failed_value), '{:.3f} ms'.format(1000.0 * duration_avg_value), - ]) + ] + + total_count = 0 + for bucket_bound in bucket_bounds: + labels = json.dumps({"le": bucket_bound}, sort_keys=True) + bucket_name = '_bucket:{:s}'.format(labels) + accumulated_count = int(metrics['DURATION'][bucket_name]) + bucket_count = accumulated_count - total_count + row.append(str(bucket_count) if bucket_count > 0 else '') + total_count = accumulated_count + + pt_stats.add_row(row) + + for bucket_column_name in bucket_column_names: + col_index = pt_stats._field_names.index(bucket_column_name) + num_non_empties = sum([1 for row in pt_stats._rows if len(row[col_index]) > 0]) + if num_non_empties > 0: continue + pt_stats.del_column(bucket_column_name) + print('') print('Performance Results:') print(pt_stats.get_string()) diff --git a/src/context/tests/test_slice.py b/src/context/tests/test_slice.py new file mode 100644 index 000000000..9d27523b1 --- /dev/null +++ b/src/context/tests/test_slice.py @@ -0,0 +1,272 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy, grpc, pytest +from common.proto.context_pb2 import ( + Context, ContextId, Device, DeviceId, Link, LinkId, Service, ServiceId, Slice, SliceId, SliceStatusEnum, Topology, + TopologyId) +from context.client.ContextClient import ContextClient +from context.service.database.uuids.Slice import slice_get_uuid +#from context.client.EventsCollector import EventsCollector +from .Objects import ( + CONTEXT, CONTEXT_ID, CONTEXT_NAME, DEVICE_R1, DEVICE_R1_ID, DEVICE_R2, DEVICE_R2_ID, DEVICE_R3, DEVICE_R3_ID, + LINK_R1_R2, LINK_R1_R2_ID, LINK_R1_R3, LINK_R1_R3_ID, LINK_R2_R3, LINK_R2_R3_ID, SERVICE_R1_R2, SERVICE_R1_R2_ID, + SERVICE_R2_R3, SERVICE_R2_R3_ID, SLICE_R1_R3, SLICE_R1_R3_ID, SLICE_R1_R3_NAME, TOPOLOGY, TOPOLOGY_ID) + +@pytest.mark.depends(on=['context/tests/test_service.py::test_service']) +def test_slice(context_client : ContextClient) -> None: + + # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- + #events_collector = EventsCollector( + # context_client, log_events_received=True, + # activate_context_collector = False, activate_topology_collector = False, activate_device_collector = False, + # activate_link_collector = False, activate_service_collector = False, activate_slice_collector = True, + # activate_connection_collector = False) + #events_collector.start() + + # ----- Prepare dependencies for the test and capture related events ----------------------------------------------- + context_client.SetContext(Context(**CONTEXT)) + context_client.SetTopology(Topology(**TOPOLOGY)) + context_client.SetDevice(Device(**DEVICE_R1)) + context_client.SetDevice(Device(**DEVICE_R2)) + context_client.SetDevice(Device(**DEVICE_R3)) + context_client.SetLink(Link(**LINK_R1_R2)) + context_client.SetLink(Link(**LINK_R1_R3)) + context_client.SetLink(Link(**LINK_R2_R3)) + context_client.SetService(Service(**SERVICE_R1_R2)) + context_client.SetService(Service(**SERVICE_R2_R3)) + + #events = events_collector.get_events(block=True, count=10) + #assert isinstance(events[0], ContextEvent) + #assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert events[0].context_id.context_uuid.uuid == context_uuid + #assert isinstance(events[1], TopologyEvent) + #assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert events[1].topology_id.context_id.context_uuid.uuid == context_uuid + #assert events[1].topology_id.topology_uuid.uuid == topology_uuid + #assert isinstance(events[2], DeviceEvent) + #assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert events[2].device_id.device_uuid.uuid == device_r1_uuid + #assert isinstance(events[3], DeviceEvent) + #assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert events[3].device_id.device_uuid.uuid == device_r2_uuid + #assert isinstance(events[4], DeviceEvent) + #assert events[4].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert events[4].device_id.device_uuid.uuid == device_r3_uuid + #assert isinstance(events[5], LinkEvent) + #assert events[5].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert events[5].link_id.link_uuid.uuid == link_r1_r2_uuid + #assert isinstance(events[6], LinkEvent) + #assert events[6].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert events[6].link_id.link_uuid.uuid == link_r1_r3_uuid + #assert isinstance(events[7], LinkEvent) + #assert events[7].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert events[7].link_id.link_uuid.uuid == link_r2_r3_uuid + #assert isinstance(events[8], ServiceEvent) + #assert events[8].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert events[8].service_id.service_uuid.uuid == service_r1_r2_uuid + #assert isinstance(events[9], ServiceEvent) + #assert events[9].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert events[9].service_id.service_uuid.uuid == service_r2_r3_uuid + + # ----- Get when the object does not exist ------------------------------------------------------------------------- + slice_id = SliceId(**SLICE_R1_R3_ID) + context_uuid,slice_uuid = slice_get_uuid(slice_id, allow_random=False) + with pytest.raises(grpc.RpcError) as e: + context_client.GetSlice(slice_id) + assert e.value.code() == grpc.StatusCode.NOT_FOUND + MSG = 'Slice({:s}/{:s}) not found; context_uuid generated was: {:s}; slice_uuid generated was: {:s}' + assert e.value.details() == MSG.format(CONTEXT_NAME, SLICE_R1_R3_NAME, context_uuid, slice_uuid) + + # ----- List when the object does not exist ------------------------------------------------------------------------ + response = context_client.GetContext(ContextId(**CONTEXT_ID)) + assert len(response.topology_ids) == 1 + assert len(response.service_ids) == 2 + assert len(response.slice_ids) == 0 + + response = context_client.ListSliceIds(ContextId(**CONTEXT_ID)) + assert len(response.slice_ids) == 0 + + response = context_client.ListSlices(ContextId(**CONTEXT_ID)) + assert len(response.slices) == 0 + + # ----- Create the object ------------------------------------------------------------------------------------------ + with pytest.raises(grpc.RpcError) as e: + WRONG_UUID = 'ffffffff-ffff-ffff-ffff-ffffffffffff' + WRONG_SLICE = copy.deepcopy(SLICE_R1_R3) + WRONG_SLICE['slice_endpoint_ids'][0]['topology_id']['context_id']['context_uuid']['uuid'] = WRONG_UUID + context_client.SetSlice(Slice(**WRONG_SLICE)) + assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT + MSG = 'request.slice_endpoint_ids[0].topology_id.context_id.context_uuid.uuid({}) is invalid; '\ + 'should be == request.slice_id.context_id.context_uuid.uuid({})' + raw_context_uuid = slice_id.context_id.context_uuid.uuid # pylint: disable=no-member + assert e.value.details() == MSG.format(WRONG_UUID, raw_context_uuid) + + response = context_client.SetSlice(Slice(**SLICE_R1_R3)) + assert response.context_id.context_uuid.uuid == context_uuid + assert response.slice_uuid.uuid == slice_uuid + + # ----- Check create event ----------------------------------------------------------------------------------------- + #event = events_collector.get_event(block=True) + #assert isinstance(event, SliceEvent) + #assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert event.slice_id.context_id.context_uuid.uuid == context_uuid + #assert event.slice_id.slice_uuid.uuid == slice_uuid + + # ----- Get when the object exists --------------------------------------------------------------------------------- + response = context_client.GetContext(ContextId(**CONTEXT_ID)) + assert response.context_id.context_uuid.uuid == context_uuid + assert response.name == CONTEXT_NAME + assert len(response.topology_ids) == 1 + assert len(response.service_ids) == 2 + assert len(response.slice_ids) == 1 + assert response.slice_ids[0].context_id.context_uuid.uuid == context_uuid + assert response.slice_ids[0].slice_uuid.uuid == slice_uuid + + response = context_client.GetSlice(SliceId(**SLICE_R1_R3_ID)) + assert response.slice_id.context_id.context_uuid.uuid == context_uuid + assert response.slice_id.slice_uuid.uuid == slice_uuid + assert response.name == SLICE_R1_R3_NAME + assert len(response.slice_endpoint_ids) == 2 + assert len(response.slice_constraints) == 2 + assert response.slice_status.slice_status == SliceStatusEnum.SLICESTATUS_PLANNED + assert len(response.slice_config.config_rules) == 3 + + # ----- List when the object exists -------------------------------------------------------------------------------- + response = context_client.ListSliceIds(ContextId(**CONTEXT_ID)) + assert len(response.slice_ids) == 1 + assert response.slice_ids[0].context_id.context_uuid.uuid == context_uuid + assert response.slice_ids[0].slice_uuid.uuid == slice_uuid + + response = context_client.ListSlices(ContextId(**CONTEXT_ID)) + assert len(response.slices) == 1 + assert response.slices[0].slice_id.context_id.context_uuid.uuid == context_uuid + assert response.slices[0].slice_id.slice_uuid.uuid == slice_uuid + assert response.slices[0].name == SLICE_R1_R3_NAME + assert len(response.slices[0].slice_endpoint_ids) == 2 + assert len(response.slices[0].slice_constraints) == 2 + assert response.slices[0].slice_status.slice_status == SliceStatusEnum.SLICESTATUS_PLANNED + assert len(response.slices[0].slice_config.config_rules) == 3 + + # ----- Update the object ------------------------------------------------------------------------------------------ + new_slice_name = 'new' + SLICE_UPDATED = copy.deepcopy(SLICE_R1_R3) + SLICE_UPDATED['name'] = new_slice_name + SLICE_UPDATED['slice_status']['slice_status'] = SliceStatusEnum.SLICESTATUS_ACTIVE + response = context_client.SetSlice(Slice(**SLICE_UPDATED)) + assert response.context_id.context_uuid.uuid == context_uuid + assert response.slice_uuid.uuid == slice_uuid + + # ----- Check update event ----------------------------------------------------------------------------------------- + #event = events_collector.get_event(block=True) + #assert isinstance(event, SliceEvent) + #assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + #assert event.slice_id.context_id.context_uuid.uuid == context_uuid + #assert event.slice_id.slice_uuid.uuid == slice_uuid + + # ----- Get when the object is modified ---------------------------------------------------------------------------- + response = context_client.GetSlice(SliceId(**SLICE_R1_R3_ID)) + assert response.slice_id.context_id.context_uuid.uuid == context_uuid + assert response.slice_id.slice_uuid.uuid == slice_uuid + assert response.name == new_slice_name + assert len(response.slice_endpoint_ids) == 2 + assert len(response.slice_constraints) == 2 + assert response.slice_status.slice_status == SliceStatusEnum.SLICESTATUS_ACTIVE + assert len(response.slice_config.config_rules) == 3 + + # ----- List when the object is modified --------------------------------------------------------------------------- + response = context_client.ListSliceIds(ContextId(**CONTEXT_ID)) + assert len(response.slice_ids) == 1 + assert response.slice_ids[0].context_id.context_uuid.uuid == context_uuid + assert response.slice_ids[0].slice_uuid.uuid == slice_uuid + + response = context_client.ListSlices(ContextId(**CONTEXT_ID)) + assert len(response.slices) == 1 + assert response.slices[0].slice_id.context_id.context_uuid.uuid == context_uuid + assert response.slices[0].slice_id.slice_uuid.uuid == slice_uuid + assert response.slices[0].name == new_slice_name + assert len(response.slices[0].slice_endpoint_ids) == 2 + assert len(response.slices[0].slice_constraints) == 2 + assert response.slices[0].slice_status.slice_status == SliceStatusEnum.SLICESTATUS_ACTIVE + assert len(response.slices[0].slice_config.config_rules) == 3 + + # ----- Remove the object ------------------------------------------------------------------------------------------ + context_client.RemoveSlice(SliceId(**SLICE_R1_R3_ID)) + + # ----- Check remove event ----------------------------------------------------------------------------------------- + #event = events_collector.get_event(block=True) + #assert isinstance(event, SliceEvent) + #assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert event.slice_id.context_id.context_uuid.uuid == context_uuid + #assert event.slice_id.slice_uuid.uuid == slice_uuid + + # ----- List after deleting the object ----------------------------------------------------------------------------- + response = context_client.GetContext(ContextId(**CONTEXT_ID)) + assert len(response.topology_ids) == 1 + assert len(response.service_ids) == 2 + assert len(response.slice_ids) == 0 + + response = context_client.ListSliceIds(ContextId(**CONTEXT_ID)) + assert len(response.slice_ids) == 0 + + response = context_client.ListSlices(ContextId(**CONTEXT_ID)) + assert len(response.slices) == 0 + + # ----- Clean dependencies used in the test and capture related events --------------------------------------------- + context_client.RemoveService(ServiceId(**SERVICE_R1_R2_ID)) + context_client.RemoveService(ServiceId(**SERVICE_R2_R3_ID)) + context_client.RemoveLink(LinkId(**LINK_R1_R2_ID)) + context_client.RemoveLink(LinkId(**LINK_R1_R3_ID)) + context_client.RemoveLink(LinkId(**LINK_R2_R3_ID)) + context_client.RemoveDevice(DeviceId(**DEVICE_R1_ID)) + context_client.RemoveDevice(DeviceId(**DEVICE_R2_ID)) + context_client.RemoveDevice(DeviceId(**DEVICE_R3_ID)) + context_client.RemoveTopology(TopologyId(**TOPOLOGY_ID)) + context_client.RemoveContext(ContextId(**CONTEXT_ID)) + + #events = events_collector.get_events(block=True, count=10) + #assert isinstance(events[0], ServiceEvent) + #assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert events[0].service_id.service_uuid.uuid == service_r1_r2_uuid + #assert isinstance(events[1], ServiceEvent) + #assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert events[1].service_id.service_uuid.uuid == service_r2_r3_uuid + #assert isinstance(events[2], LinkEvent) + #assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert events[2].link_id.link_uuid.uuid == link_r1_r2_uuid + #assert isinstance(events[3], LinkEvent) + #assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert events[3].link_id.link_uuid.uuid == link_r1_r3_uuid + #assert isinstance(events[4], LinkEvent) + #assert events[4].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert events[4].link_id.link_uuid.uuid == link_r2_r3_uuid + #assert isinstance(events[5], DeviceEvent) + #assert events[5].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert events[5].device_id.device_uuid.uuid == device_r1_uuid + #assert isinstance(events[6], DeviceEvent) + #assert events[6].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert events[6].device_id.device_uuid.uuid == device_r2_uuid + #assert isinstance(events[7], DeviceEvent) + #assert events[7].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert events[7].device_id.device_uuid.uuid == device_r3_uuid + #assert isinstance(events[8], TopologyEvent) + #assert events[8].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert events[8].topology_id.context_id.context_uuid.uuid == context_uuid + #assert events[8].topology_id.topology_uuid.uuid == topology_uuid + #assert isinstance(events[9], ContextEvent) + #assert events[9].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert events[9].context_id.context_uuid.uuid == context_uuid + + # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- + #events_collector.stop() diff --git a/test-context.sh b/test-context.sh index 47d81817b..a33b1e7dc 100755 --- a/test-context.sh +++ b/test-context.sh @@ -46,7 +46,8 @@ coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose --ma context/tests/test_topology.py \ context/tests/test_device.py \ context/tests/test_link.py \ - context/tests/test_service.py + context/tests/test_service.py \ + context/tests/test_slice.py echo echo "Coverage report:" -- GitLab From d03919be0562757f11086a783bc506a4411ef91d Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Tue, 10 Jan 2023 16:31:45 +0000 Subject: [PATCH 028/158] Context component: - moved relational models to associated classes - migrated support for slice subslices - added filters to prevent upsert when there is nothing to update --- src/context/service/database/ConfigRule.py | 3 +- src/context/service/database/Constraint.py | 3 +- src/context/service/database/Device.py | 2 +- src/context/service/database/Link.py | 4 +- src/context/service/database/Service.py | 3 +- src/context/service/database/Slice.py | 60 ++++++------- .../database/models/ConnectionModel.py | 16 ++++ .../service/database/models/LinkModel.py | 11 ++- .../service/database/models/RelationModels.py | 86 ------------------- .../service/database/models/ServiceModel.py | 9 ++ .../service/database/models/SliceModel.py | 33 ++++++- .../service/database/models/TopologyModel.py | 18 ++++ 12 files changed, 121 insertions(+), 127 deletions(-) delete mode 100644 src/context/service/database/models/RelationModels.py diff --git a/src/context/service/database/ConfigRule.py b/src/context/service/database/ConfigRule.py index af1dd1ec5..05dda20aa 100644 --- a/src/context/service/database/ConfigRule.py +++ b/src/context/service/database/ConfigRule.py @@ -52,7 +52,8 @@ def upsert_config_rules( if service_uuid is not None: stmt = stmt.where(ConfigRuleModel.service_uuid == service_uuid) if slice_uuid is not None: stmt = stmt.where(ConfigRuleModel.slice_uuid == slice_uuid ) session.execute(stmt) - session.execute(insert(ConfigRuleModel).values(config_rules)) + if len(config_rules) > 0: + session.execute(insert(ConfigRuleModel).values(config_rules)) #Union_SpecificConfigRule = Union[ diff --git a/src/context/service/database/Constraint.py b/src/context/service/database/Constraint.py index 5c94d13c0..f79159a35 100644 --- a/src/context/service/database/Constraint.py +++ b/src/context/service/database/Constraint.py @@ -47,7 +47,8 @@ def upsert_constraints( if service_uuid is not None: stmt = stmt.where(ConstraintModel.service_uuid == service_uuid) if slice_uuid is not None: stmt = stmt.where(ConstraintModel.slice_uuid == slice_uuid ) session.execute(stmt) - session.execute(insert(ConstraintModel).values(constraints)) + if len(constraints) > 0: + session.execute(insert(ConstraintModel).values(constraints)) # def set_constraint(self, db_constraints: ConstraintsModel, grpc_constraint: Constraint, position: int # ) -> Tuple[Union_ConstraintModel, bool]: diff --git a/src/context/service/database/Device.py b/src/context/service/database/Device.py index 8899b5a12..acb1603c6 100644 --- a/src/context/service/database/Device.py +++ b/src/context/service/database/Device.py @@ -23,7 +23,7 @@ from common.tools.object_factory.Device import json_device_id from context.service.database.ConfigRule import compose_config_rules_data, upsert_config_rules from .models.DeviceModel import DeviceModel from .models.EndPointModel import EndPointModel -from .models.RelationModels import TopologyDeviceModel +from .models.TopologyModel import TopologyDeviceModel from .models.enums.DeviceDriver import grpc_to_enum__device_driver from .models.enums.DeviceOperationalStatus import grpc_to_enum__device_operational_status from .models.enums.KpiSampleType import grpc_to_enum__kpi_sample_type diff --git a/src/context/service/database/Link.py b/src/context/service/database/Link.py index 7032a2138..a2b4e3035 100644 --- a/src/context/service/database/Link.py +++ b/src/context/service/database/Link.py @@ -20,8 +20,8 @@ from typing import Dict, List, Optional, Set, Tuple from common.proto.context_pb2 import Link, LinkId, LinkIdList, LinkList from common.rpc_method_wrapper.ServiceExceptions import NotFoundException from common.tools.object_factory.Link import json_link_id -from .models.LinkModel import LinkModel -from .models.RelationModels import LinkEndPointModel, TopologyLinkModel +from .models.LinkModel import LinkModel, LinkEndPointModel +from .models.TopologyModel import TopologyLinkModel from .uuids.EndPoint import endpoint_get_uuid from .uuids.Link import link_get_uuid diff --git a/src/context/service/database/Service.py b/src/context/service/database/Service.py index 0230bc4d5..c926c2540 100644 --- a/src/context/service/database/Service.py +++ b/src/context/service/database/Service.py @@ -25,8 +25,7 @@ from context.service.database.ConfigRule import compose_config_rules_data, upser from context.service.database.Constraint import compose_constraints_data, upsert_constraints from .models.enums.ServiceStatus import grpc_to_enum__service_status from .models.enums.ServiceType import grpc_to_enum__service_type -from .models.RelationModels import ServiceEndPointModel -from .models.ServiceModel import ServiceModel +from .models.ServiceModel import ServiceModel, ServiceEndPointModel from .uuids.Context import context_get_uuid from .uuids.EndPoint import endpoint_get_uuid from .uuids.Service import service_get_uuid diff --git a/src/context/service/database/Slice.py b/src/context/service/database/Slice.py index 318923555..00b2fd24b 100644 --- a/src/context/service/database/Slice.py +++ b/src/context/service/database/Slice.py @@ -25,8 +25,7 @@ from common.tools.object_factory.Slice import json_slice_id from context.service.database.ConfigRule import compose_config_rules_data, upsert_config_rules from context.service.database.Constraint import compose_constraints_data, upsert_constraints from .models.enums.SliceStatus import grpc_to_enum__slice_status -from .models.RelationModels import SliceEndPointModel, SliceServiceModel #, SliceSubSliceModel -from .models.SliceModel import SliceModel +from .models.SliceModel import SliceModel, SliceEndPointModel, SliceServiceModel, SliceSubSliceModel from .uuids.Context import context_get_uuid from .uuids.EndPoint import endpoint_get_uuid from .uuids.Service import service_get_uuid @@ -96,13 +95,13 @@ def slice_set(db_engine : Engine, request : Slice) -> Tuple[SliceId, bool]: 'service_uuid': service_uuid, }) - #slice_subslices_data : List[Dict] = list() - #for i,subslice_id in enumerate(request.slice_subslice_ids): - # _, subslice_uuid = slice_get_uuid(subslice_id, allow_random=False) - # slice_subslices_data.append({ - # 'slice_uuid' : slice_uuid, - # 'subslice_uuid': subslice_uuid, - # }) + slice_subslices_data : List[Dict] = list() + for i,subslice_id in enumerate(request.slice_subslice_ids): + _, subslice_uuid = slice_get_uuid(subslice_id, allow_random=False) + slice_subslices_data.append({ + 'slice_uuid' : slice_uuid, + 'subslice_uuid': subslice_uuid, + }) constraints = compose_constraints_data(request.slice_constraints, slice_uuid=slice_uuid) config_rules = compose_config_rules_data(request.slice_config.config_rules, slice_uuid=slice_uuid) @@ -129,23 +128,26 @@ def slice_set(db_engine : Engine, request : Slice) -> Tuple[SliceId, bool]: ) session.execute(stmt) - stmt = insert(SliceEndPointModel).values(slice_endpoints_data) - stmt = stmt.on_conflict_do_nothing( - index_elements=[SliceEndPointModel.slice_uuid, SliceEndPointModel.endpoint_uuid] - ) - session.execute(stmt) + if len(slice_endpoints_data) > 0: + stmt = insert(SliceEndPointModel).values(slice_endpoints_data) + stmt = stmt.on_conflict_do_nothing( + index_elements=[SliceEndPointModel.slice_uuid, SliceEndPointModel.endpoint_uuid] + ) + session.execute(stmt) - stmt = insert(SliceServiceModel).values(slice_services_data) - stmt = stmt.on_conflict_do_nothing( - index_elements=[SliceServiceModel.slice_uuid, SliceServiceModel.service_uuid] - ) - session.execute(stmt) + if len(slice_services_data) > 0: + stmt = insert(SliceServiceModel).values(slice_services_data) + stmt = stmt.on_conflict_do_nothing( + index_elements=[SliceServiceModel.slice_uuid, SliceServiceModel.service_uuid] + ) + session.execute(stmt) - #stmt = insert(SliceSubSliceModel).values(slice_subslices_data) - #stmt = stmt.on_conflict_do_nothing( - # index_elements=[SliceSubSliceModel.slice_uuid, SliceSubSliceModel.subslice_uuid] - #) - #session.execute(stmt) + if len(slice_subslices_data) > 0: + stmt = insert(SliceSubSliceModel).values(slice_subslices_data) + stmt = stmt.on_conflict_do_nothing( + index_elements=[SliceSubSliceModel.slice_uuid, SliceSubSliceModel.subslice_uuid] + ) + session.execute(stmt) upsert_constraints(session, constraints, slice_uuid=slice_uuid) upsert_config_rules(session, config_rules, slice_uuid=slice_uuid) @@ -193,11 +195,11 @@ def slice_unset(db_engine : Engine, request : Slice) -> Tuple[SliceId, bool]: SliceServiceModel.slice_uuid == slice_uuid, SliceServiceModel.service_uuid.in_(slice_service_uuids) )).delete() - #num_deletes += session.query(SliceSubSliceModel)\ - # .filter_by(and_( - # SliceSubSliceModel.slice_uuid == slice_uuid, - # SliceSubSliceModel.subslice_uuid.in_(slice_subslice_uuids) - # )).delete() + num_deletes += session.query(SliceSubSliceModel)\ + .filter_by(and_( + SliceSubSliceModel.slice_uuid == slice_uuid, + SliceSubSliceModel.subslice_uuid.in_(slice_subslice_uuids) + )).delete() num_deletes += session.query(SliceEndPointModel)\ .filter_by(and_( SliceEndPointModel.slice_uuid == slice_uuid, diff --git a/src/context/service/database/models/ConnectionModel.py b/src/context/service/database/models/ConnectionModel.py index 546fb7a80..19cafc59b 100644 --- a/src/context/service/database/models/ConnectionModel.py +++ b/src/context/service/database/models/ConnectionModel.py @@ -25,6 +25,11 @@ from common.proto.context_pb2 import EndPointId from .EndPointModel import EndPointModel from .ServiceModel import ServiceModel +from sqlalchemy import Column, ForeignKey #, ForeignKeyConstraint +#from sqlalchemy.dialects.postgresql import UUID +from sqlalchemy.orm import relationship +from ._Base import _Base + def remove_dict_key(dictionary : Dict, key : str): dictionary.pop(key, None) return dictionary @@ -111,6 +116,17 @@ class ConnectionModel(Model): if include_sub_service_ids: result['sub_service_ids'] = self.dump_sub_service_ids() return result + + + +# class ConnectionSubServiceModel(Model): +# pk = PrimaryKeyField() +# connection_fk = ForeignKeyField(ConnectionModel) +# sub_service_fk = ForeignKeyField(ServiceModel) + + + + def set_path_hop( database : Database, db_path : PathModel, position : int, db_endpoint : EndPointModel ) -> Tuple[PathHopModel, bool]: diff --git a/src/context/service/database/models/LinkModel.py b/src/context/service/database/models/LinkModel.py index fd4f80c16..950f48763 100644 --- a/src/context/service/database/models/LinkModel.py +++ b/src/context/service/database/models/LinkModel.py @@ -13,7 +13,7 @@ # limitations under the License. from typing import Dict -from sqlalchemy import Column, String +from sqlalchemy import Column, ForeignKey, String from sqlalchemy.dialects.postgresql import UUID from sqlalchemy.orm import relationship from ._Base import _Base @@ -39,3 +39,12 @@ class LinkModel(_Base): for link_endpoint in self.link_endpoints ], } + +class LinkEndPointModel(_Base): + __tablename__ = 'link_endpoint' + + link_uuid = Column(ForeignKey('link.link_uuid', ondelete='CASCADE' ), primary_key=True) + endpoint_uuid = Column(ForeignKey('endpoint.endpoint_uuid', ondelete='RESTRICT'), primary_key=True) + + link = relationship('LinkModel', back_populates='link_endpoints', lazy='joined') + endpoint = relationship('EndPointModel', lazy='joined') # back_populates='link_endpoints' diff --git a/src/context/service/database/models/RelationModels.py b/src/context/service/database/models/RelationModels.py deleted file mode 100644 index 468b14519..000000000 --- a/src/context/service/database/models/RelationModels.py +++ /dev/null @@ -1,86 +0,0 @@ -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from sqlalchemy import Column, ForeignKey #, ForeignKeyConstraint -#from sqlalchemy.dialects.postgresql import UUID -from sqlalchemy.orm import relationship -from ._Base import _Base - -# class ConnectionSubServiceModel(Model): -# pk = PrimaryKeyField() -# connection_fk = ForeignKeyField(ConnectionModel) -# sub_service_fk = ForeignKeyField(ServiceModel) - -class LinkEndPointModel(_Base): - __tablename__ = 'link_endpoint' - - link_uuid = Column(ForeignKey('link.link_uuid', ondelete='CASCADE' ), primary_key=True) - endpoint_uuid = Column(ForeignKey('endpoint.endpoint_uuid', ondelete='RESTRICT'), primary_key=True) - - link = relationship('LinkModel', back_populates='link_endpoints', lazy='joined') - endpoint = relationship('EndPointModel', lazy='joined') # back_populates='link_endpoints' - -class ServiceEndPointModel(_Base): - __tablename__ = 'service_endpoint' - - service_uuid = Column(ForeignKey('service.service_uuid', ondelete='CASCADE' ), primary_key=True) - endpoint_uuid = Column(ForeignKey('endpoint.endpoint_uuid', ondelete='RESTRICT'), primary_key=True) - - service = relationship('ServiceModel', back_populates='service_endpoints', lazy='joined') - endpoint = relationship('EndPointModel', lazy='joined') # back_populates='service_endpoints' - -class SliceEndPointModel(_Base): - __tablename__ = 'slice_endpoint' - - slice_uuid = Column(ForeignKey('slice.slice_uuid', ondelete='CASCADE' ), primary_key=True) - endpoint_uuid = Column(ForeignKey('endpoint.endpoint_uuid', ondelete='RESTRICT'), primary_key=True) - - slice = relationship('SliceModel', back_populates='slice_endpoints', lazy='joined') - endpoint = relationship('EndPointModel', lazy='joined') # back_populates='slice_endpoints' - -class SliceServiceModel(_Base): - __tablename__ = 'slice_service' - - slice_uuid = Column(ForeignKey('slice.slice_uuid', ondelete='CASCADE' ), primary_key=True) - service_uuid = Column(ForeignKey('service.service_uuid', ondelete='RESTRICT'), primary_key=True) - - slice = relationship('SliceModel', back_populates='slice_services', lazy='joined') - service = relationship('ServiceModel', lazy='joined') # back_populates='slice_services' - -#class SliceSubSliceModel(_Base): -# __tablename__ = 'slice_subslice' -# -# slice_uuid = Column(ForeignKey('slice.slice_uuid', ondelete='CASCADE' ), primary_key=True) -# subslice_uuid = Column(ForeignKey('slice.slice_uuid', ondelete='RESTRICT'), primary_key=True) -# -# slice = relationship('SliceModel', foreign_keys=[slice_uuid], lazy='joined') #back_populates='slice_subslices' -# subslice = relationship('SliceModel', foreign_keys=[subslice_uuid], lazy='joined') #back_populates='slice_subslices' - -class TopologyDeviceModel(_Base): - __tablename__ = 'topology_device' - - topology_uuid = Column(ForeignKey('topology.topology_uuid', ondelete='RESTRICT'), primary_key=True) - device_uuid = Column(ForeignKey('device.device_uuid', ondelete='CASCADE' ), primary_key=True) - - #topology = relationship('TopologyModel', lazy='joined') # back_populates='topology_devices' - device = relationship('DeviceModel', lazy='joined') # back_populates='topology_devices' - -class TopologyLinkModel(_Base): - __tablename__ = 'topology_link' - - topology_uuid = Column(ForeignKey('topology.topology_uuid', ondelete='RESTRICT'), primary_key=True) - link_uuid = Column(ForeignKey('link.link_uuid', ondelete='CASCADE' ), primary_key=True) - - #topology = relationship('TopologyModel', lazy='joined') # back_populates='topology_links' - link = relationship('LinkModel', lazy='joined') # back_populates='topology_links' diff --git a/src/context/service/database/models/ServiceModel.py b/src/context/service/database/models/ServiceModel.py index b08043844..e1e57f4c7 100644 --- a/src/context/service/database/models/ServiceModel.py +++ b/src/context/service/database/models/ServiceModel.py @@ -60,3 +60,12 @@ class ServiceModel(_Base): for config_rule in sorted(self.config_rules, key=operator.attrgetter('position')) ]}, } + +class ServiceEndPointModel(_Base): + __tablename__ = 'service_endpoint' + + service_uuid = Column(ForeignKey('service.service_uuid', ondelete='CASCADE' ), primary_key=True) + endpoint_uuid = Column(ForeignKey('endpoint.endpoint_uuid', ondelete='RESTRICT'), primary_key=True) + + service = relationship('ServiceModel', back_populates='service_endpoints', lazy='joined') + endpoint = relationship('EndPointModel', lazy='joined') # back_populates='service_endpoints' diff --git a/src/context/service/database/models/SliceModel.py b/src/context/service/database/models/SliceModel.py index ef2b64962..d3dff51e1 100644 --- a/src/context/service/database/models/SliceModel.py +++ b/src/context/service/database/models/SliceModel.py @@ -13,7 +13,7 @@ # limitations under the License. import operator -from sqlalchemy import Column, Enum, ForeignKey, String +from sqlalchemy import Column, Enum, ForeignKey, String, Table from sqlalchemy.dialects.postgresql import UUID from sqlalchemy.orm import relationship from typing import Dict @@ -33,7 +33,8 @@ class SliceModel(_Base): context = relationship('ContextModel', back_populates='slices') slice_endpoints = relationship('SliceEndPointModel') # lazy='joined', back_populates='slice' slice_services = relationship('SliceServiceModel') # lazy='joined', back_populates='slice' - #slice_subslices = relationship('SliceSubSliceModel') # lazy='joined', back_populates='slice' + slice_subslices = relationship( + 'SliceSubSliceModel', primaryjoin='slice.c.slice_uuid == slice_subslice.c.slice_uuid') constraints = relationship('ConstraintModel', passive_deletes=True) # lazy='joined', back_populates='slice' config_rules = relationship('ConfigRuleModel', passive_deletes=True) # lazy='joined', back_populates='slice' @@ -65,11 +66,35 @@ class SliceModel(_Base): for slice_service in self.slice_services ], 'slice_subslice_ids': [ - #slice_subslice.subslice.dump_id() - #for slice_subslice in self.slice_subslices + slice_subslice.subslice.dump_id() + for slice_subslice in self.slice_subslices ], 'slice_owner': { 'owner_uuid': {'uuid': self.slice_owner_uuid}, 'owner_string': self.slice_owner_string } } + +class SliceEndPointModel(_Base): + __tablename__ = 'slice_endpoint' + + slice_uuid = Column(ForeignKey('slice.slice_uuid', ondelete='CASCADE' ), primary_key=True) + endpoint_uuid = Column(ForeignKey('endpoint.endpoint_uuid', ondelete='RESTRICT'), primary_key=True) + + slice = relationship('SliceModel', back_populates='slice_endpoints', lazy='joined') + endpoint = relationship('EndPointModel', lazy='joined') # back_populates='slice_endpoints' + +class SliceServiceModel(_Base): + __tablename__ = 'slice_service' + + slice_uuid = Column(ForeignKey('slice.slice_uuid', ondelete='CASCADE' ), primary_key=True) + service_uuid = Column(ForeignKey('service.service_uuid', ondelete='RESTRICT'), primary_key=True) + + slice = relationship('SliceModel', back_populates='slice_services', lazy='joined') + service = relationship('ServiceModel', lazy='joined') # back_populates='slice_services' + +class SliceSubSliceModel(_Base): + __tablename__ = 'slice_subslice' + + slice_uuid = Column(ForeignKey('slice.slice_uuid', ondelete='CASCADE' ), primary_key=True) + subslice_uuid = Column(ForeignKey('slice.slice_uuid', ondelete='RESTRICT'), primary_key=True) diff --git a/src/context/service/database/models/TopologyModel.py b/src/context/service/database/models/TopologyModel.py index 8c59bf58a..ef1ae0be8 100644 --- a/src/context/service/database/models/TopologyModel.py +++ b/src/context/service/database/models/TopologyModel.py @@ -42,3 +42,21 @@ class TopologyModel(_Base): 'device_ids' : [{'device_uuid': {'uuid': td.device_uuid}} for td in self.topology_devices], 'link_ids' : [{'link_uuid' : {'uuid': tl.link_uuid }} for tl in self.topology_links ], } + +class TopologyDeviceModel(_Base): + __tablename__ = 'topology_device' + + topology_uuid = Column(ForeignKey('topology.topology_uuid', ondelete='RESTRICT'), primary_key=True) + device_uuid = Column(ForeignKey('device.device_uuid', ondelete='CASCADE' ), primary_key=True) + + #topology = relationship('TopologyModel', lazy='joined') # back_populates='topology_devices' + device = relationship('DeviceModel', lazy='joined') # back_populates='topology_devices' + +class TopologyLinkModel(_Base): + __tablename__ = 'topology_link' + + topology_uuid = Column(ForeignKey('topology.topology_uuid', ondelete='RESTRICT'), primary_key=True) + link_uuid = Column(ForeignKey('link.link_uuid', ondelete='CASCADE' ), primary_key=True) + + #topology = relationship('TopologyModel', lazy='joined') # back_populates='topology_links' + link = relationship('LinkModel', lazy='joined') # back_populates='topology_links' -- GitLab From 3a1dc80b38b30d500aef7f8a5cd846dddb8478a1 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Thu, 12 Jan 2023 13:32:51 +0000 Subject: [PATCH 029/158] Common: - updated policy rule object factory - updated rpc method wrapper decorator's duration buckets --- src/common/rpc_method_wrapper/Decorator.py | 9 ++++-- src/common/tools/object_factory/PolicyRule.py | 28 +++++++++++-------- 2 files changed, 24 insertions(+), 13 deletions(-) diff --git a/src/common/rpc_method_wrapper/Decorator.py b/src/common/rpc_method_wrapper/Decorator.py index 31dc4b82b..5fc814e70 100644 --- a/src/common/rpc_method_wrapper/Decorator.py +++ b/src/common/rpc_method_wrapper/Decorator.py @@ -16,7 +16,7 @@ import grpc, logging from enum import Enum from typing import Dict, List from prometheus_client import Counter, Histogram -from prometheus_client.metrics import MetricWrapperBase +from prometheus_client.metrics import MetricWrapperBase, INF from common.tools.grpc.Tools import grpc_message_to_json_string from .ServiceExceptions import ServiceException @@ -34,7 +34,12 @@ def get_counter_requests(method_name : str, request_condition : RequestCondition def get_histogram_duration(method_name : str) -> Histogram: name = '{:s}_histogram_duration'.format(method_name.replace(':', '_')) description = '{:s} histogram of request duration'.format(method_name) - return Histogram(name, description) + return Histogram(name, description, buckets=( + .005, + .01, .02, .03, .04, .05, .06, .07, .08, .09, + .1, .2, .3, .4, .5, .6, .7, .8, .9, + 1, 2, 3, 4, 5, 6, 7, 8, 9, + INF)) METRIC_TEMPLATES = { '{:s}_COUNTER_STARTED' : lambda method_name: get_counter_requests (method_name, RequestConditionEnum.STARTED), diff --git a/src/common/tools/object_factory/PolicyRule.py b/src/common/tools/object_factory/PolicyRule.py index 8702f931d..5094db2ee 100644 --- a/src/common/tools/object_factory/PolicyRule.py +++ b/src/common/tools/object_factory/PolicyRule.py @@ -15,20 +15,26 @@ import logging from typing import Dict, List, Optional from common.proto.policy_condition_pb2 import BooleanOperator +from common.proto.policy_pb2 import PolicyRuleStateEnum LOGGER = logging.getLogger(__name__) -def json_policy_rule_id(policy_rule_uuid : str) -> Dict: - return {'uuid': {'uuid': policy_rule_uuid}} +def json_policyrule_id(policyrule_uuid : str) -> Dict: + return {'uuid': {'uuid': policyrule_uuid}} -def json_policy_rule( - policy_rule_uuid : str, policy_priority : int = 1, +def json_policyrule( + policyrule_uuid : str, policy_priority : int = 1, + policy_state : PolicyRuleStateEnum = PolicyRuleStateEnum.POLICY_UNDEFINED, policy_state_message : str = '', boolean_operator : BooleanOperator = BooleanOperator.POLICYRULE_CONDITION_BOOLEAN_AND, condition_list : List[Dict] = [], action_list : List[Dict] = [], service_id : Optional[Dict] = None, device_id_list : List[Dict] = [] ) -> Dict: basic = { - 'policyRuleId': json_policy_rule_id(policy_rule_uuid), + 'policyRuleId': json_policyrule_id(policyrule_uuid), + 'policyRuleState': { + 'policyRuleState': policy_state, + 'policyRuleStateMessage': policy_state_message, + }, 'priority': policy_priority, 'conditionList': condition_list, 'booleanOperator': boolean_operator, @@ -37,12 +43,12 @@ def json_policy_rule( result = {} if service_id is not None: - policy_rule_type = 'service' - result[policy_rule_type] = {'policyRuleBasic': basic} - result[policy_rule_type]['serviceId'] = service_id + policyrule_type = 'service' + result[policyrule_type] = {'policyRuleBasic': basic} + result[policyrule_type]['serviceId'] = service_id else: - policy_rule_type = 'device' - result[policy_rule_type] = {'policyRuleBasic': basic} + policyrule_type = 'device' + result[policyrule_type] = {'policyRuleBasic': basic} - result[policy_rule_type]['deviceList'] = device_id_list + result[policyrule_type]['deviceList'] = device_id_list return result -- GitLab From f39c8d0745e297ad1fb8f39d92188975f6a1acbe Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Thu, 12 Jan 2023 13:33:12 +0000 Subject: [PATCH 030/158] Compute component: - minor cosmetic changes --- .../service/rest_server/nbi_plugins/debug_api/Resources.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/compute/service/rest_server/nbi_plugins/debug_api/Resources.py b/src/compute/service/rest_server/nbi_plugins/debug_api/Resources.py index a701fd563..dcbc600de 100644 --- a/src/compute/service/rest_server/nbi_plugins/debug_api/Resources.py +++ b/src/compute/service/rest_server/nbi_plugins/debug_api/Resources.py @@ -21,7 +21,7 @@ from common.tools.object_factory.Connection import json_connection_id from common.tools.object_factory.Context import json_context_id from common.tools.object_factory.Device import json_device_id from common.tools.object_factory.Link import json_link_id -from common.tools.object_factory.PolicyRule import json_policy_rule_id +from common.tools.object_factory.PolicyRule import json_policyrule_id from common.tools.object_factory.Service import json_service_id from common.tools.object_factory.Slice import json_slice_id from common.tools.object_factory.Topology import json_topology_id @@ -53,7 +53,7 @@ def grpc_topology_id(context_uuid, topology_uuid): return TopologyId(**json_topology_id(topology_uuid, context_id=json_context_id(context_uuid))) def grpc_policy_rule_id(policy_rule_uuid): - return PolicyRuleId(**json_policy_rule_id(policy_rule_uuid)) + return PolicyRuleId(**json_policyrule_id(policy_rule_uuid)) class _Resource(Resource): -- GitLab From 51ac8f603249179b34d197544f310511746d6b24 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Thu, 12 Jan 2023 13:37:07 +0000 Subject: [PATCH 031/158] Context component: - extended grpc-to-enum tool to support arbitrary enum item names - added PolicyRuleState enum model - misc minor comment corrections - misc import reorderings - migrated Connection model and methods - migrated PolicyRule model and methods - removed unused files --- .../service/ContextServiceServicerImpl.py | 237 ++++----------- src/context/service/Database.py | 131 -------- src/context/service/database/Connection.py | 134 +++++++++ src/context/service/database/PolicyRule.py | 129 ++++++++ src/context/service/database/Slice.py | 2 +- .../database/models/ConfigRuleModel.py | 2 +- .../database/models/ConnectionModel.py | 196 +++--------- .../database/models/ConstraintModel.py | 2 +- .../service/database/models/ContextModel.py | 2 +- .../service/database/models/DeviceModel.py | 2 +- .../service/database/models/EndPointModel.py | 2 +- .../service/database/models/LinkModel.py | 2 +- .../database/models/PolicyRuleModel.py | 66 ++++- .../service/database/models/ServiceModel.py | 2 +- .../service/database/models/TopologyModel.py | 2 +- .../database/models/enums/PolicyRuleState.py | 33 +++ .../database/models/enums/_GrpcToEnum.py | 24 +- .../service/database/uuids/Connection.py | 33 +++ .../service/database/uuids/PolicuRule.py | 29 ++ src/context/tests/Objects.py | 8 +- src/context/tests/_test_connection.py | 280 ------------------ src/context/tests/_test_policy.py | 114 ------- src/context/tests/conftest.py | 7 +- src/context/tests/test_connection.py | 251 ++++++++++++++++ src/context/tests/test_policy.py | 90 ++++++ test-context.sh | 16 +- 26 files changed, 902 insertions(+), 894 deletions(-) delete mode 100644 src/context/service/Database.py create mode 100644 src/context/service/database/Connection.py create mode 100644 src/context/service/database/PolicyRule.py create mode 100644 src/context/service/database/models/enums/PolicyRuleState.py create mode 100644 src/context/service/database/uuids/Connection.py create mode 100644 src/context/service/database/uuids/PolicuRule.py delete mode 100644 src/context/tests/_test_connection.py delete mode 100644 src/context/tests/_test_policy.py create mode 100644 src/context/tests/test_connection.py create mode 100644 src/context/tests/test_policy.py diff --git a/src/context/service/ContextServiceServicerImpl.py b/src/context/service/ContextServiceServicerImpl.py index d93a8f059..6ac21a973 100644 --- a/src/context/service/ContextServiceServicerImpl.py +++ b/src/context/service/ContextServiceServicerImpl.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. - import grpc, json, logging, sqlalchemy -#from sqlalchemy.orm import Session, contains_eager, selectinload, sessionmaker -#from sqlalchemy.dialects.postgresql import UUID, insert from typing import Iterator from common.message_broker.MessageBroker import MessageBroker from common.proto.context_pb2 import ( @@ -27,45 +24,23 @@ from common.proto.context_pb2 import ( Service, ServiceEvent, ServiceId, ServiceIdList, ServiceList, Slice, SliceEvent, SliceId, SliceIdList, SliceList, Topology, TopologyEvent, TopologyId, TopologyIdList, TopologyList) -#from common.proto.policy_pb2 import PolicyRuleIdList, PolicyRuleId, PolicyRuleList, PolicyRule +from common.proto.policy_pb2 import PolicyRuleIdList, PolicyRuleId, PolicyRuleList, PolicyRule from common.proto.context_pb2_grpc import ContextServiceServicer from common.proto.context_policy_pb2_grpc import ContextPolicyServiceServicer -#from common.tools.object_factory.Context import json_context_id from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method -#from common.rpc_method_wrapper.ServiceExceptions import ( -# InvalidArgumentException, NotFoundException, OperationFailedException) +from .database.Connection import ( + connection_delete, connection_get, connection_list_ids, connection_list_objs, connection_set) from .database.Context import context_delete, context_get, context_list_ids, context_list_objs, context_set from .database.Device import device_delete, device_get, device_list_ids, device_list_objs, device_set from .database.Link import link_delete, link_get, link_list_ids, link_list_objs, link_set +from .database.PolicyRule import ( + policyrule_delete, policyrule_get, policyrule_list_ids, policyrule_list_objs, policyrule_set) from .database.Service import service_delete, service_get, service_list_ids, service_list_objs, service_set from .database.Slice import slice_delete, slice_get, slice_list_ids, slice_list_objs, slice_set, slice_unset from .database.Topology import topology_delete, topology_get, topology_list_ids, topology_list_objs, topology_set -#from common.tools.grpc.Tools import grpc_message_to_json, grpc_message_to_json_string -#from context.service.Database import Database -#from context.service.database.ConfigModel import ( -# ConfigModel, ORM_ConfigActionEnum, ConfigRuleModel, grpc_config_rules_to_raw, update_config) -#from context.service.database.ConnectionModel import ConnectionModel, set_path -#from context.service.database.ConstraintModel import ( -# ConstraintModel, ConstraintsModel, Union_ConstraintModel, CONSTRAINT_PARSERS, set_constraints) -#from context.service.database.models.ContextModel import ContextModel -#from context.service.database.models.DeviceModel import ( -# DeviceModel, grpc_to_enum__device_operational_status, grpc_to_enum__device_driver) -#from context.service.database.models.EndPointModel import EndPointModel, grpc_to_enum__kpi_sample_type -#from context.service.database.EndPointModel import EndPointModel, set_kpi_sample_types -#from context.service.database.Events import notify_event -#from context.service.database.LinkModel import LinkModel -#from context.service.database.PolicyRuleModel import PolicyRuleModel -#from context.service.database.RelationModels import TopologyDeviceModel -# ConnectionSubServiceModel, LinkEndPointModel, ServiceEndPointModel, SliceEndPointModel, SliceServiceModel, -# SliceSubSliceModel, TopologyLinkModel) -#from context.service.database.ServiceModel import ( -# ServiceModel, grpc_to_enum__service_status, grpc_to_enum__service_type) -#from context.service.database.SliceModel import SliceModel, grpc_to_enum__slice_status -#from context.service.database.TopologyModel import TopologyModel from .Constants import ( CONSUME_TIMEOUT, TOPIC_CONNECTION, TOPIC_CONTEXT, TOPIC_DEVICE, TOPIC_LINK, #TOPIC_POLICY, TOPIC_SERVICE, TOPIC_SLICE, TOPIC_TOPOLOGY) -#from .ChangeFeedClient import ChangeFeedClient LOGGER = logging.getLogger(__name__) @@ -109,14 +84,14 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer @safe_and_metered_rpc_method(METRICS, LOGGER) def SetContext(self, request : Context, context : grpc.ServicerContext) -> ContextId: - context_id,updated = context_set(self.db_engine, request) + context_id,updated = context_set(self.db_engine, request) # pylint: disable=unused-variable #event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE #notify_event(self.messagebroker, TOPIC_CONTEXT, event_type, {'context_id': context_id}) return context_id @safe_and_metered_rpc_method(METRICS, LOGGER) def RemoveContext(self, request : ContextId, context : grpc.ServicerContext) -> Empty: - deleted = context_delete(self.db_engine, request) + deleted = context_delete(self.db_engine, request) # pylint: disable=unused-variable #if deleted: # notify_event(self.messagebroker, TOPIC_CONTEXT, EventTypeEnum.EVENTTYPE_REMOVE, {'context_id': request}) return Empty() @@ -143,14 +118,14 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer @safe_and_metered_rpc_method(METRICS, LOGGER) def SetTopology(self, request : Topology, context : grpc.ServicerContext) -> TopologyId: - topology_id,updated = topology_set(self.db_engine, request) + topology_id,updated = topology_set(self.db_engine, request) # pylint: disable=unused-variable #event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE #notify_event(self.messagebroker, TOPIC_TOPOLOGY, event_type, {'topology_id': topology_id}) return topology_id @safe_and_metered_rpc_method(METRICS, LOGGER) def RemoveTopology(self, request : TopologyId, context : grpc.ServicerContext) -> Empty: - deleted = topology_delete(self.db_engine, request) + deleted = topology_delete(self.db_engine, request) # pylint: disable=unused-variable #if deleted: # notify_event(self.messagebroker, TOPIC_TOPOLOGY, EventTypeEnum.EVENTTYPE_REMOVE, {'topology_id': request}) return Empty() @@ -177,14 +152,14 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer @safe_and_metered_rpc_method(METRICS, LOGGER) def SetDevice(self, request : Device, context : grpc.ServicerContext) -> DeviceId: - device_id,updated = device_set(self.db_engine, request) + device_id,updated = device_set(self.db_engine, request) # pylint: disable=unused-variable #event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE #notify_event(self.messagebroker, TOPIC_DEVICE, event_type, {'device_id': device_id}) return device_id @safe_and_metered_rpc_method(METRICS, LOGGER) def RemoveDevice(self, request : DeviceId, context : grpc.ServicerContext) -> Empty: - deleted = device_delete(self.db_engine, request) + deleted = device_delete(self.db_engine, request) # pylint: disable=unused-variable #if deleted: # notify_event(self.messagebroker, TOPIC_DEVICE, EventTypeEnum.EVENTTYPE_REMOVE, {'device_id': request}) return Empty() @@ -211,14 +186,14 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer @safe_and_metered_rpc_method(METRICS, LOGGER) def SetLink(self, request : Link, context : grpc.ServicerContext) -> LinkId: - link_id,updated = link_set(self.db_engine, request) + link_id,updated = link_set(self.db_engine, request) # pylint: disable=unused-variable #event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE #notify_event(self.messagebroker, TOPIC_LINK, event_type, {'link_id': link_id}) return link_id @safe_and_metered_rpc_method(METRICS, LOGGER) def RemoveLink(self, request : LinkId, context : grpc.ServicerContext) -> Empty: - deleted = link_delete(self.db_engine, request) + deleted = link_delete(self.db_engine, request) # pylint: disable=unused-variable #if deleted: # notify_event(self.messagebroker, TOPIC_LINK, EventTypeEnum.EVENTTYPE_REMOVE, {'link_id': request}) return Empty() @@ -245,14 +220,14 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer @safe_and_metered_rpc_method(METRICS, LOGGER) def SetService(self, request : Service, context : grpc.ServicerContext) -> ServiceId: - service_id,updated = service_set(self.db_engine, request) + service_id,updated = service_set(self.db_engine, request) # pylint: disable=unused-variable #event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE #notify_event(self.messagebroker, TOPIC_SERVICE, event_type, {'service_id': service_id}) return service_id @safe_and_metered_rpc_method(METRICS, LOGGER) def RemoveService(self, request : ServiceId, context : grpc.ServicerContext) -> Empty: - deleted = service_delete(self.db_engine, request) + deleted = service_delete(self.db_engine, request) # pylint: disable=unused-variable #if deleted: # notify_event(self.messagebroker, TOPIC_SERVICE, EventTypeEnum.EVENTTYPE_REMOVE, {'service_id': request}) return Empty() @@ -279,21 +254,21 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer @safe_and_metered_rpc_method(METRICS, LOGGER) def SetSlice(self, request : Slice, context : grpc.ServicerContext) -> SliceId: - slice_id,updated = slice_set(self.db_engine, request) + slice_id,updated = slice_set(self.db_engine, request) # pylint: disable=unused-variable #event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE #notify_event(self.messagebroker, TOPIC_SLICE, event_type, {'slice_id': slice_id}) return slice_id @safe_and_metered_rpc_method(METRICS, LOGGER) def UnsetSlice(self, request : Slice, context : grpc.ServicerContext) -> SliceId: - slice_id,updated = slice_unset(self.db_engine, request) + slice_id,updated = slice_unset(self.db_engine, request) # pylint: disable=unused-variable #if updated: # notify_event(self.messagebroker, TOPIC_SLICE, EventTypeEnum.EVENTTYPE_UPDATE, {'slice_id': slice_id}) return slice_id @safe_and_metered_rpc_method(METRICS, LOGGER) def RemoveSlice(self, request : SliceId, context : grpc.ServicerContext) -> Empty: - deleted = slice_delete(self.db_engine, request) + deleted = slice_delete(self.db_engine, request) # pylint: disable=unused-variable #if deleted: # notify_event(self.messagebroker, TOPIC_SLICE, EventTypeEnum.EVENTTYPE_REMOVE, {'slice_id': request}) return Empty() @@ -306,86 +281,32 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # ----- Connection ------------------------------------------------------------------------------------------------- -# @safe_and_metered_rpc_method(METRICS, LOGGER) -# def ListConnectionIds(self, request : ServiceId, context : grpc.ServicerContext) -> ConnectionIdList: -# with self.session() as session: -# result = session.query(DeviceModel).all() -# return DeviceIdList(device_ids=[device.dump_id() for device in result]) -# -# with self.lock: -# str_key = key_to_str([request.context_id.context_uuid.uuid, request.service_uuid.uuid]) -# db_service : ServiceModel = get_object(self.database, ServiceModel, str_key) -# db_connections : Set[ConnectionModel] = get_related_objects(db_service, ConnectionModel) -# db_connections = sorted(db_connections, key=operator.attrgetter('pk')) -# return ConnectionIdList(connection_ids=[db_connection.dump_id() for db_connection in db_connections]) - -# @safe_and_metered_rpc_method(METRICS, LOGGER) -# def ListConnections(self, request : ContextId, context : grpc.ServicerContext) -> ServiceList: -# with self.lock: -# str_key = key_to_str([request.context_id.context_uuid.uuid, request.service_uuid.uuid]) -# db_service : ServiceModel = get_object(self.database, ServiceModel, str_key) -# db_connections : Set[ConnectionModel] = get_related_objects(db_service, ConnectionModel) -# db_connections = sorted(db_connections, key=operator.attrgetter('pk')) -# return ConnectionList(connections=[db_connection.dump() for db_connection in db_connections]) - -# @safe_and_metered_rpc_method(METRICS, LOGGER) -# def GetConnection(self, request : ConnectionId, context : grpc.ServicerContext) -> Connection: -# with self.lock: -# db_connection : ConnectionModel = get_object(self.database, ConnectionModel, request.connection_uuid.uuid) -# return Connection(**db_connection.dump(include_path=True, include_sub_service_ids=True)) - -# @safe_and_metered_rpc_method(METRICS, LOGGER) -# def SetConnection(self, request : Connection, context : grpc.ServicerContext) -> ConnectionId: -# with self.lock: -# connection_uuid = request.connection_id.connection_uuid.uuid -# -# connection_attributes = {'connection_uuid': connection_uuid} -# -# service_context_uuid = request.service_id.context_id.context_uuid.uuid -# service_uuid = request.service_id.service_uuid.uuid -# if len(service_context_uuid) > 0 and len(service_uuid) > 0: -# str_service_key = key_to_str([service_context_uuid, service_uuid]) -# db_service : ServiceModel = get_object(self.database, ServiceModel, str_service_key) -# connection_attributes['service_fk'] = db_service -# -# path_hops_result = set_path(self.database, connection_uuid, request.path_hops_endpoint_ids, path_name = '') -# db_path = path_hops_result[0] -# connection_attributes['path_fk'] = db_path -# -# result : Tuple[ConnectionModel, bool] = update_or_create_object( -# self.database, ConnectionModel, connection_uuid, connection_attributes) -# db_connection, updated = result -# -# for sub_service_id in request.sub_service_ids: -# sub_service_uuid = sub_service_id.service_uuid.uuid -# sub_service_context_uuid = sub_service_id.context_id.context_uuid.uuid -# str_sub_service_key = key_to_str([sub_service_context_uuid, sub_service_uuid]) -# db_service : ServiceModel = get_object(self.database, ServiceModel, str_sub_service_key) -# -# str_connection_sub_service_key = key_to_str([connection_uuid, str_sub_service_key], separator='--') -# result : Tuple[ConnectionSubServiceModel, bool] = get_or_create_object( -# self.database, ConnectionSubServiceModel, str_connection_sub_service_key, { -# 'connection_fk': db_connection, 'sub_service_fk': db_service}) -# #db_connection_sub_service, connection_sub_service_created = result -# -# event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE -# dict_connection_id = db_connection.dump_id() -# notify_event(self.messagebroker, TOPIC_CONNECTION, event_type, {'connection_id': dict_connection_id}) -# return ConnectionId(**dict_connection_id) - -# @safe_and_metered_rpc_method(METRICS, LOGGER) -# def RemoveConnection(self, request : ConnectionId, context : grpc.ServicerContext) -> Empty: -# with self.lock: -# db_connection = ConnectionModel(self.database, request.connection_uuid.uuid, auto_load=False) -# found = db_connection.load() -# if not found: return Empty() -# -# dict_connection_id = db_connection.dump_id() -# db_connection.delete() -# -# event_type = EventTypeEnum.EVENTTYPE_REMOVE -# notify_event(self.messagebroker, TOPIC_CONNECTION, event_type, {'connection_id': dict_connection_id}) -# return Empty() + @safe_and_metered_rpc_method(METRICS, LOGGER) + def ListConnectionIds(self, request : ServiceId, context : grpc.ServicerContext) -> ConnectionIdList: + return connection_list_ids(self.db_engine, request) + + @safe_and_metered_rpc_method(METRICS, LOGGER) + def ListConnections(self, request : ContextId, context : grpc.ServicerContext) -> ConnectionList: + return connection_list_objs(self.db_engine, request) + + @safe_and_metered_rpc_method(METRICS, LOGGER) + def GetConnection(self, request : ConnectionId, context : grpc.ServicerContext) -> Connection: + return connection_get(self.db_engine, request) + + @safe_and_metered_rpc_method(METRICS, LOGGER) + def SetConnection(self, request : Connection, context : grpc.ServicerContext) -> ConnectionId: + connection_id,updated = connection_set(self.db_engine, request) # pylint: disable=unused-variable + #event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE + #notify_event(self.messagebroker, TOPIC_CONNECTION, event_type, {'connection_id': connection_id}) + return connection_id + + @safe_and_metered_rpc_method(METRICS, LOGGER) + def RemoveConnection(self, request : ConnectionId, context : grpc.ServicerContext) -> Empty: + deleted = connection_delete(self.db_engine, request) # pylint: disable=unused-variable + #if deleted: + # event_type = EventTypeEnum.EVENTTYPE_REMOVE + # notify_event(self.messagebroker, TOPIC_CONNECTION, event_type, {'connection_id': request}) + return Empty() @safe_and_metered_rpc_method(METRICS, LOGGER) def GetConnectionEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[ConnectionEvent]: @@ -395,52 +316,24 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # ----- Policy ----------------------------------------------------------------------------------------------------- -# @safe_and_metered_rpc_method(METRICS, LOGGER) -# def ListPolicyRuleIds(self, request : Empty, context: grpc.ServicerContext) -> PolicyRuleIdList: -# with self.lock: -# db_policy_rules: List[PolicyRuleModel] = get_all_objects(self.database, PolicyRuleModel) -# db_policy_rules = sorted(db_policy_rules, key=operator.attrgetter('pk')) -# return PolicyRuleIdList(policyRuleIdList=[db_policy_rule.dump_id() for db_policy_rule in db_policy_rules]) - -# @safe_and_metered_rpc_method(METRICS, LOGGER) -# def ListPolicyRules(self, request : Empty, context: grpc.ServicerContext) -> PolicyRuleList: -# with self.lock: -# db_policy_rules: List[PolicyRuleModel] = get_all_objects(self.database, PolicyRuleModel) -# db_policy_rules = sorted(db_policy_rules, key=operator.attrgetter('pk')) -# return PolicyRuleList(policyRules=[db_policy_rule.dump() for db_policy_rule in db_policy_rules]) - -# @safe_and_metered_rpc_method(METRICS, LOGGER) -# def GetPolicyRule(self, request : PolicyRuleId, context: grpc.ServicerContext) -> PolicyRule: -# with self.lock: -# policy_rule_uuid = request.uuid.uuid -# db_policy_rule: PolicyRuleModel = get_object(self.database, PolicyRuleModel, policy_rule_uuid) -# return PolicyRule(**db_policy_rule.dump()) - -# @safe_and_metered_rpc_method(METRICS, LOGGER) -# def SetPolicyRule(self, request : PolicyRule, context: grpc.ServicerContext) -> PolicyRuleId: -# with self.lock: -# policy_rule_type = request.WhichOneof('policy_rule') -# policy_rule_json = grpc_message_to_json(request) -# policy_rule_uuid = policy_rule_json[policy_rule_type]['policyRuleBasic']['policyRuleId']['uuid']['uuid'] -# result: Tuple[PolicyRuleModel, bool] = update_or_create_object( -# self.database, PolicyRuleModel, policy_rule_uuid, {'value': json.dumps(policy_rule_json)}) -# db_policy, updated = result -# -# #event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE -# dict_policy_id = db_policy.dump_id() -# #notify_event(self.messagebroker, TOPIC_POLICY, event_type, {"policy_id": dict_policy_id}) -# return PolicyRuleId(**dict_policy_id) - -# @safe_and_metered_rpc_method(METRICS, LOGGER) -# def RemovePolicyRule(self, request : PolicyRuleId, context: grpc.ServicerContext) -> Empty: -# with self.lock: -# policy_uuid = request.uuid.uuid -# db_policy = PolicyRuleModel(self.database, policy_uuid, auto_load=False) -# found = db_policy.load() -# if not found: return Empty() -# -# dict_policy_id = db_policy.dump_id() -# db_policy.delete() -# #event_type = EventTypeEnum.EVENTTYPE_REMOVE -# #notify_event(self.messagebroker, TOPIC_POLICY, event_type, {"policy_id": dict_policy_id}) -# return Empty() + @safe_and_metered_rpc_method(METRICS, LOGGER) + def ListPolicyRuleIds(self, request : Empty, context: grpc.ServicerContext) -> PolicyRuleIdList: + return policyrule_list_ids(self.db_engine) + + @safe_and_metered_rpc_method(METRICS, LOGGER) + def ListPolicyRules(self, request : Empty, context: grpc.ServicerContext) -> PolicyRuleList: + return policyrule_list_objs(self.db_engine) + + @safe_and_metered_rpc_method(METRICS, LOGGER) + def GetPolicyRule(self, request : PolicyRuleId, context: grpc.ServicerContext) -> PolicyRule: + return policyrule_get(self.db_engine, request) + + @safe_and_metered_rpc_method(METRICS, LOGGER) + def SetPolicyRule(self, request : PolicyRule, context: grpc.ServicerContext) -> PolicyRuleId: + policyrule_id,updated = policyrule_set(self.db_engine, request) # pylint: disable=unused-variable + return policyrule_id + + @safe_and_metered_rpc_method(METRICS, LOGGER) + def RemovePolicyRule(self, request : PolicyRuleId, context: grpc.ServicerContext) -> Empty: + deleted = policyrule_delete(self.db_engine, request) # pylint: disable=unused-variable + return Empty() diff --git a/src/context/service/Database.py b/src/context/service/Database.py deleted file mode 100644 index edb903a10..000000000 --- a/src/context/service/Database.py +++ /dev/null @@ -1,131 +0,0 @@ -import logging -from sqlalchemy import MetaData -from sqlalchemy.orm import Session #, joinedload -from typing import Tuple #, List -from context.service.database.models._Base import _Base -#from common.orm.backend.Tools import key_to_str -from common.rpc_method_wrapper.ServiceExceptions import NotFoundException - -LOGGER = logging.getLogger(__name__) - -class Database(Session): - def __init__(self, session): - super().__init__() - self.session = session - - def get_session(self): - return self.session - - def get_all(self, model): - result = [] - with self.session() as session: - for entry in session.query(model).all(): - result.append(entry) - - return result - - def create_or_update(self, model): - with self.session() as session: - att = getattr(model, model.main_pk_name()) - filt = {model.main_pk_name(): att} - t_model = type(model) - obj = session.query(t_model).filter_by(**filt).one_or_none() - - if obj: - for key in obj.__table__.columns.keys(): - setattr(obj, key, getattr(model, key)) - found = True - session.commit() - return obj, found - else: - found = False - session.add(model) - session.commit() - return model, found - - def create(self, model): - with self.session() as session: - session.add(model) - session.commit() - return model - - def remove(self, model, filter_d): - model_t = type(model) - with self.session() as session: - session.query(model_t).filter_by(**filter_d).delete() - session.commit() - - - def clear(self): - with self.session() as session: - engine = session.get_bind() - _Base.metadata.drop_all(engine) - _Base.metadata.create_all(engine) - - def dump_by_table(self): - with self.session() as session: - engine = session.get_bind() - meta = MetaData() - meta.reflect(engine) - result = {} - - for table in meta.sorted_tables: - result[table.name] = [dict(row) for row in engine.execute(table.select())] - LOGGER.info(result) - return result - - def dump_all(self): - with self.session() as session: - engine = session.get_bind() - meta = MetaData() - meta.reflect(engine) - result = [] - - for table in meta.sorted_tables: - for row in engine.execute(table.select()): - result.append((table.name, dict(row))) - - return result - - def get_object(self, model_class: _Base, main_key: str, raise_if_not_found=False): - filt = {model_class.main_pk_name(): main_key} - with self.session() as session: - get = session.query(model_class).filter_by(**filt).one_or_none() - - if not get: - if raise_if_not_found: - raise NotFoundException(model_class.__name__.replace('Model', ''), main_key) - - dump = None - if hasattr(get, 'dump'): - dump = get.dump() - return get, dump - - def get_object_filter(self, model_class: _Base, filt, raise_if_not_found=False): - with self.session() as session: - get = session.query(model_class).filter_by(**filt).all() - - if not get: - if raise_if_not_found: - raise NotFoundException(model_class.__name__.replace('Model', '')) - else: - return None, None - - if isinstance(get, list): - return get, [obj.dump() for obj in get] - - return get, get.dump() - - def get_or_create(self, model_class: _Base, key_parts: str, filt=None) -> Tuple[_Base, bool]: - if not filt: - filt = {model_class.main_pk_name(): key_parts} - with self.session() as session: - get = session.query(model_class).filter_by(**filt).one_or_none() - if get: - return get, False - else: - obj = model_class() - setattr(obj, model_class.main_pk_name(), key_parts) - session.add(obj) - session.commit() - return obj, True diff --git a/src/context/service/database/Connection.py b/src/context/service/database/Connection.py new file mode 100644 index 000000000..3ab0b83bf --- /dev/null +++ b/src/context/service/database/Connection.py @@ -0,0 +1,134 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re +from sqlalchemy.dialects.postgresql import insert +from sqlalchemy.engine import Engine +from sqlalchemy.exc import IntegrityError +from sqlalchemy.orm import Session, sessionmaker +from sqlalchemy_cockroachdb import run_transaction +from typing import Dict, List, Optional, Tuple +from common.proto.context_pb2 import Connection, ConnectionId, ConnectionIdList, ConnectionList, ServiceId +from common.rpc_method_wrapper.ServiceExceptions import NotFoundException +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.Connection import json_connection_id +from .models.ConnectionModel import ConnectionEndPointModel, ConnectionModel, ConnectionSubServiceModel +from .uuids.Connection import connection_get_uuid +from .uuids.EndPoint import endpoint_get_uuid +from .uuids.Service import service_get_uuid + +def connection_list_ids(db_engine : Engine, request : ServiceId) -> ConnectionIdList: + _,service_uuid = service_get_uuid(request, allow_random=False) + def callback(session : Session) -> List[Dict]: + obj_list : List[ConnectionModel] = session.query(ConnectionModel).filter_by(service_uuid=service_uuid).all() + #.options(selectinload(ContextModel.connection)).filter_by(context_uuid=context_uuid).one_or_none() + return [obj.dump_id() for obj in obj_list] + return ConnectionIdList(connection_ids=run_transaction(sessionmaker(bind=db_engine), callback)) + +def connection_list_objs(db_engine : Engine, request : ServiceId) -> ConnectionList: + _,service_uuid = service_get_uuid(request, allow_random=False) + def callback(session : Session) -> List[Dict]: + obj_list : List[ConnectionModel] = session.query(ConnectionModel).filter_by(service_uuid=service_uuid).all() + #.options(selectinload(ContextModel.connection)).filter_by(context_uuid=context_uuid).one_or_none() + return [obj.dump() for obj in obj_list] + return ConnectionList(connections=run_transaction(sessionmaker(bind=db_engine), callback)) + +def connection_get(db_engine : Engine, request : ConnectionId) -> Connection: + connection_uuid = connection_get_uuid(request, allow_random=False) + def callback(session : Session) -> Optional[Dict]: + obj : Optional[ConnectionModel] = session.query(ConnectionModel)\ + .filter_by(connection_uuid=connection_uuid).one_or_none() + return None if obj is None else obj.dump() + obj = run_transaction(sessionmaker(bind=db_engine), callback) + if obj is None: + raise NotFoundException('Connection', request.connection_uuid.uuid, extra_details=[ + 'connection_uuid generated was: {:s}'.format(connection_uuid), + ]) + return Connection(**obj) + +def connection_set(db_engine : Engine, request : Connection) -> Tuple[ConnectionId, bool]: + connection_uuid = connection_get_uuid(request.connection_id, allow_random=True) + _,service_uuid = service_get_uuid(request.service_id, allow_random=False) + settings = grpc_message_to_json_string(request.settings), + + connection_data = [{ + 'connection_uuid': connection_uuid, + 'service_uuid' : service_uuid, + 'settings' : settings, + }] + + connection_endpoints_data : List[Dict] = list() + for position,endpoint_id in enumerate(request.path_hops_endpoint_ids): + _, _, endpoint_uuid = endpoint_get_uuid(endpoint_id, allow_random=False) + connection_endpoints_data.append({ + 'connection_uuid': connection_uuid, + 'endpoint_uuid' : endpoint_uuid, + 'position' : position, + }) + + connection_subservices_data : List[Dict] = list() + for i,service_id in enumerate(request.sub_service_ids): + _, service_uuid = service_get_uuid(service_id, allow_random=False) + connection_subservices_data.append({ + 'connection_uuid': connection_uuid, + 'subservice_uuid': service_uuid, + }) + + def callback(session : Session) -> None: + stmt = insert(ConnectionModel).values(connection_data) + stmt = stmt.on_conflict_do_update( + index_elements=[ConnectionModel.connection_uuid], + set_=dict(settings = stmt.excluded.settings) + ) + session.execute(stmt) + + if len(connection_endpoints_data) > 0: + stmt = insert(ConnectionEndPointModel).values(connection_endpoints_data) + stmt = stmt.on_conflict_do_nothing( + index_elements=[ConnectionEndPointModel.connection_uuid, ConnectionEndPointModel.endpoint_uuid] + ) + try: + session.execute(stmt) + except IntegrityError as e: + str_args = ''.join(e.args).replace('\n', ' ') + pattern_fkv = \ + r'\(psycopg2.errors.ForeignKeyViolation\) '\ + r'insert on table \"([^\"]+)\" violates foreign key constraint '\ + r'.+DETAIL\: Key \([^\)]+\)\=\([\'\"]*([^\)\'\"]+)[\'\"]*\) is not present in table \"([^\"]+)\"' + m_fkv = re.match(pattern_fkv, str_args) + if m_fkv is not None: + insert_table, primary_key, origin_table = m_fkv.groups() + raise NotFoundException(origin_table, primary_key, extra_details=[ + 'while inserting in table "{:s}"'.format(insert_table) + ]) from e + else: + raise + + if len(connection_subservices_data) > 0: + stmt = insert(ConnectionSubServiceModel).values(connection_subservices_data) + stmt = stmt.on_conflict_do_nothing( + index_elements=[ConnectionSubServiceModel.connection_uuid, ConnectionSubServiceModel.subservice_uuid] + ) + session.execute(stmt) + + run_transaction(sessionmaker(bind=db_engine), callback) + updated = False # TODO: improve and check if created/updated + return ConnectionId(**json_connection_id(connection_uuid)),updated + +def connection_delete(db_engine : Engine, request : ConnectionId) -> bool: + connection_uuid = connection_get_uuid(request, allow_random=False) + def callback(session : Session) -> bool: + num_deleted = session.query(ConnectionModel).filter_by(connection_uuid=connection_uuid).delete() + return num_deleted > 0 + return run_transaction(sessionmaker(bind=db_engine), callback) diff --git a/src/context/service/database/PolicyRule.py b/src/context/service/database/PolicyRule.py new file mode 100644 index 000000000..da8356e04 --- /dev/null +++ b/src/context/service/database/PolicyRule.py @@ -0,0 +1,129 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +from sqlalchemy.dialects.postgresql import insert +from sqlalchemy.engine import Engine +from sqlalchemy.orm import Session, sessionmaker +from sqlalchemy_cockroachdb import run_transaction +from typing import Dict, List, Optional, Set, Tuple +from common.proto.policy_pb2 import PolicyRule, PolicyRuleId, PolicyRuleIdList, PolicyRuleList +from common.rpc_method_wrapper.ServiceExceptions import NotFoundException +from common.tools.grpc.Tools import grpc_message_to_json +from common.tools.object_factory.PolicyRule import json_policyrule_id +from context.service.database.uuids.Device import device_get_uuid +from .models.enums.PolicyRuleState import grpc_to_enum__policyrule_state +from .models.PolicyRuleModel import PolicyRuleDeviceModel, PolicyRuleKindEnum, PolicyRuleModel +from .uuids.PolicuRule import policyrule_get_uuid +from .uuids.Service import service_get_uuid + +def policyrule_list_ids(db_engine : Engine) -> PolicyRuleIdList: + def callback(session : Session) -> List[Dict]: + obj_list : List[PolicyRuleModel] = session.query(PolicyRuleModel).all() + #.options(selectinload(PolicyRuleModel.topology)).filter_by(context_uuid=context_uuid).one_or_none() + return [obj.dump_id() for obj in obj_list] + return PolicyRuleIdList(policyRuleIdList=run_transaction(sessionmaker(bind=db_engine), callback)) + +def policyrule_list_objs(db_engine : Engine) -> PolicyRuleList: + def callback(session : Session) -> List[Dict]: + obj_list : List[PolicyRuleModel] = session.query(PolicyRuleModel).all() + #.options(selectinload(PolicyRuleModel.topology)).filter_by(context_uuid=context_uuid).one_or_none() + return [obj.dump() for obj in obj_list] + return PolicyRuleList(policyRules=run_transaction(sessionmaker(bind=db_engine), callback)) + +def policyrule_get(db_engine : Engine, request : PolicyRuleId) -> PolicyRule: + policyrule_uuid = policyrule_get_uuid(request, allow_random=False) + def callback(session : Session) -> Optional[Dict]: + obj : Optional[PolicyRuleModel] = session.query(PolicyRuleModel)\ + .filter_by(policyrule_uuid=policyrule_uuid).one_or_none() + return None if obj is None else obj.dump() + obj = run_transaction(sessionmaker(bind=db_engine), callback) + if obj is None: + raw_policyrule_uuid = request.uuid.uuid + raise NotFoundException('PolicyRule', raw_policyrule_uuid, extra_details=[ + 'policyrule_uuid generated was: {:s}'.format(policyrule_uuid) + ]) + return PolicyRule(**obj) + +def policyrule_set(db_engine : Engine, request : PolicyRule) -> Tuple[PolicyRuleId, bool]: + policyrule_kind = request.WhichOneof('policy_rule') + policyrule_spec = getattr(request, policyrule_kind) + policyrule_basic = policyrule_spec.policyRuleBasic + policyrule_id = policyrule_basic.policyRuleId + policyrule_uuid = policyrule_get_uuid(policyrule_id, allow_random=False) + + policyrule_kind = PolicyRuleKindEnum._member_map_.get(policyrule_kind.upper()) # pylint: disable=no-member + policyrule_state = grpc_to_enum__policyrule_state(policyrule_basic.policyRuleState.policyRuleState) + policyrule_state_message = policyrule_basic.policyRuleState.policyRuleStateMessage + + json_policyrule_basic = grpc_message_to_json(policyrule_basic) + policyrule_eca_data = json.dumps({ + 'conditionList': json_policyrule_basic.get('conditionList', []), + 'booleanOperator': json_policyrule_basic['booleanOperator'], + 'actionList': json_policyrule_basic.get('actionList', []), + }, sort_keys=True) + + policyrule_data = [{ + 'policyrule_uuid' : policyrule_uuid, + 'policyrule_kind' : policyrule_kind, + 'policyrule_state' : policyrule_state, + 'policyrule_state_message': policyrule_state_message, + 'policyrule_priority' : policyrule_basic.priority, + 'policyrule_eca_data' : policyrule_eca_data, + }] + + policyrule_service_uuid = None + if policyrule_kind == PolicyRuleKindEnum.SERVICE: + _,policyrule_service_uuid = service_get_uuid(policyrule_spec.serviceId, allow_random=False) + policyrule_data[0]['policyrule_service_uuid'] = policyrule_service_uuid + + device_uuids : Set[str] = set() + related_devices : List[Dict] = list() + for device_id in policyrule_spec.deviceList: + device_uuid = device_get_uuid(device_id, allow_random=False) + if device_uuid in device_uuids: continue + related_devices.append({ + 'policyrule_uuid': policyrule_uuid, + 'device_uuid' : device_uuid, + }) + device_uuids.add(device_uuid) + + def callback(session : Session) -> None: + stmt = insert(PolicyRuleModel).values(policyrule_data) + stmt = stmt.on_conflict_do_update( + index_elements=[PolicyRuleModel.policyrule_uuid], + set_=dict( + policyrule_state = stmt.excluded.policyrule_state, + policyrule_state_message = stmt.excluded.policyrule_state_message, + policyrule_priority = stmt.excluded.policyrule_priority, + policyrule_eca_data = stmt.excluded.policyrule_eca_data, + ) + ) + session.execute(stmt) + + if len(related_devices) > 0: + session.execute(insert(PolicyRuleDeviceModel).values(related_devices).on_conflict_do_nothing( + index_elements=[PolicyRuleDeviceModel.policyrule_uuid, PolicyRuleDeviceModel.device_uuid] + )) + + run_transaction(sessionmaker(bind=db_engine), callback) + updated = False # TODO: improve and check if created/updated + return PolicyRuleId(**json_policyrule_id(policyrule_uuid)),updated + +def policyrule_delete(db_engine : Engine, request : PolicyRuleId) -> bool: + policyrule_uuid = policyrule_get_uuid(request, allow_random=False) + def callback(session : Session) -> bool: + num_deleted = session.query(PolicyRuleModel).filter_by(policyrule_uuid=policyrule_uuid).delete() + return num_deleted > 0 + return run_transaction(sessionmaker(bind=db_engine), callback) diff --git a/src/context/service/database/Slice.py b/src/context/service/database/Slice.py index 00b2fd24b..6566f94c5 100644 --- a/src/context/service/database/Slice.py +++ b/src/context/service/database/Slice.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from sqlalchemy import and_, delete +from sqlalchemy import and_ from sqlalchemy.dialects.postgresql import insert from sqlalchemy.engine import Engine from sqlalchemy.orm import Session, sessionmaker diff --git a/src/context/service/database/models/ConfigRuleModel.py b/src/context/service/database/models/ConfigRuleModel.py index 0e4b94427..c2baa8df6 100644 --- a/src/context/service/database/models/ConfigRuleModel.py +++ b/src/context/service/database/models/ConfigRuleModel.py @@ -19,7 +19,7 @@ from typing import Dict from .enums.ConfigAction import ORM_ConfigActionEnum from ._Base import _Base -# Enum values should match name of field in ConfigRuleModel +# Enum values should match name of field in ConfigRule message class ConfigRuleKindEnum(enum.Enum): CUSTOM = 'custom' ACL = 'acl' diff --git a/src/context/service/database/models/ConnectionModel.py b/src/context/service/database/models/ConnectionModel.py index 19cafc59b..a1d45a934 100644 --- a/src/context/service/database/models/ConnectionModel.py +++ b/src/context/service/database/models/ConnectionModel.py @@ -12,175 +12,63 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging, operator -from typing import Dict, List, Optional, Set, Tuple, Union -from common.orm.Database import Database -from common.orm.backend.Tools import key_to_str -from common.orm.fields.ForeignKeyField import ForeignKeyField -from common.orm.fields.IntegerField import IntegerField -from common.orm.fields.PrimaryKeyField import PrimaryKeyField -from common.orm.model.Model import Model -from common.orm.HighLevel import get_object, get_or_create_object, get_related_objects, update_or_create_object -from common.proto.context_pb2 import EndPointId -from .EndPointModel import EndPointModel -from .ServiceModel import ServiceModel - -from sqlalchemy import Column, ForeignKey #, ForeignKeyConstraint -#from sqlalchemy.dialects.postgresql import UUID +import json, logging, operator +from sqlalchemy import Column, ForeignKey, Integer, CheckConstraint, String +from sqlalchemy.dialects.postgresql import UUID from sqlalchemy.orm import relationship +from typing import Dict from ._Base import _Base -def remove_dict_key(dictionary : Dict, key : str): - dictionary.pop(key, None) - return dictionary - -from sqlalchemy import Column, Enum, ForeignKey, Integer, CheckConstraint -from typing import Dict, List -from common.orm.HighLevel import get_related_objects -from common.proto.context_pb2 import ServiceStatusEnum, ServiceTypeEnum -from .ConfigRuleModel import ConfigModel -from .ConstraintModel import ConstraintsModel -from .models.ContextModel import ContextModel -from .Tools import grpc_to_enum -from sqlalchemy.dialects.postgresql import UUID -from context.service.database.models._Base import Base -import enum -LOGGER = logging.getLogger(__name__) - LOGGER = logging.getLogger(__name__) -class PathModel(Model): # pylint: disable=abstract-method - path_uuid = Column(UUID(as_uuid=False), primary_key=True, unique=True) - - def delete(self) -> None: - for db_path_hop_pk,_ in self.references(PathHopModel): - PathHopModel(self.database, db_path_hop_pk).delete() - super().delete() - - def dump(self) -> List[Dict]: - db_path_hop_pks = self.references(PathHopModel) - path_hops = [PathHopModel(self.database, pk).dump(include_position=True) for pk,_ in db_path_hop_pks] - path_hops = sorted(path_hops, key=operator.itemgetter('position')) - return [remove_dict_key(path_hop, 'position') for path_hop in path_hops] +class ConnectionModel(_Base): + __tablename__ = 'connection' -class PathHopModel(Model): # pylint: disable=abstract-method - path_hop_uuid = Column(UUID(as_uuid=False), primary_key=True, unique=True) - path_uuid = Column(UUID(as_uuid=False), ForeignKey("Path.path_uuid")) - position = Column(Integer, CheckConstraint('position >= 0'), nullable=False) - endpoint_uuid = Column(UUID(as_uuid=False), ForeignKey("EndPoint.endpoint_uuid")) + connection_uuid = Column(UUID(as_uuid=False), primary_key=True) + service_uuid = Column(ForeignKey('service.service_uuid', ondelete='RESTRICT'), nullable=False) + settings = Column(String, nullable=False) - def dump(self, include_position=True) -> Dict: # pylint: disable=arguments-differ - db_endpoint : EndPointModel = EndPointModel(self.database, self.endpoint_fk) - result = db_endpoint.dump_id() - if include_position: result['position'] = self.position - return result - -class ConnectionModel(Model): - pk = PrimaryKeyField() - # connection_uuid = StringField(required=True, allow_empty=False) - connection_uuid = Column(UUID(as_uuid=False), primary_key=True, unique=True) - # service_fk = ForeignKeyField(ServiceModel, required=False) - service_uuid = Column(UUID(as_uuid=False), ForeignKey("Service.service_uuid")) - path_fk = ForeignKeyField(PathModel, required=True) - - def delete(self) -> None: - # pylint: disable=import-outside-toplevel - from .RelationModels import ConnectionSubServiceModel - - # Do not remove sub-services automatically. They are supported by real services, so Service component should - # deal with the correct removal workflow to deconfigure the devices. - for db_connection_sub_service_pk,_ in self.references(ConnectionSubServiceModel): - ConnectionSubServiceModel(self.database, db_connection_sub_service_pk).delete() - - super().delete() - PathModel(self.database, self.path_fk).delete() + connection_service = relationship('ServiceModel') # back_populates='connections' + connection_endpoints = relationship('ConnectionEndPointModel') # lazy='joined', back_populates='connection' + connection_subservices = relationship('ConnectionSubServiceModel') # lazy='joined', back_populates='connection' def dump_id(self) -> Dict: + return {'connection_uuid': {'uuid': self.connection_uuid}} + + def dump(self) -> Dict: return { - 'connection_uuid': {'uuid': self.connection_uuid}, + 'connection_id' : self.dump_id(), + 'service_id' : self.connection_service.dump_id(), + 'settings' : json.loads(self.settings), + 'path_hops_endpoint_ids': [ + c_ep.endpoint.dump_id() + for c_ep in sorted(self.connection_endpoints, key=operator.attrgetter('position')) + ], + 'sub_service_ids' : [ + c_ss.subservice.dump_id() + for c_ss in self.connection_subservices + ], } - def dump_path_hops_endpoint_ids(self) -> List[Dict]: - return PathModel(self.database, self.path_fk).dump() - - def dump_sub_service_ids(self) -> List[Dict]: - from .RelationModels import ConnectionSubServiceModel # pylint: disable=import-outside-toplevel - db_sub_services = get_related_objects(self, ConnectionSubServiceModel, 'sub_service_fk') - return [db_sub_service.dump_id() for db_sub_service in sorted(db_sub_services, key=operator.attrgetter('pk'))] - - def dump(self, include_path=True, include_sub_service_ids=True) -> Dict: # pylint: disable=arguments-differ - result = {'connection_id': self.dump_id()} - if self.service_fk is not None: - result['service_id'] = ServiceModel(self.database, self.service_fk).dump_id() - if include_path: result['path_hops_endpoint_ids'] = self.dump_path_hops_endpoint_ids() - if include_sub_service_ids: result['sub_service_ids'] = self.dump_sub_service_ids() - return result - - - - -# class ConnectionSubServiceModel(Model): -# pk = PrimaryKeyField() -# connection_fk = ForeignKeyField(ConnectionModel) -# sub_service_fk = ForeignKeyField(ServiceModel) - - - - -def set_path_hop( - database : Database, db_path : PathModel, position : int, db_endpoint : EndPointModel - ) -> Tuple[PathHopModel, bool]: - - str_path_hop_key = key_to_str([db_path.pk, db_endpoint.pk], separator=':') - result : Tuple[PathHopModel, bool] = update_or_create_object(database, PathHopModel, str_path_hop_key, { - 'path_fk': db_path, 'position': position, 'endpoint_fk': db_endpoint}) - db_path_hop, updated = result - return db_path_hop, updated - -def delete_path_hop( - database : Database, db_path : PathModel, db_path_hop_pk : str - ) -> None: - - db_path_hop : Optional[PathHopModel] = get_object(database, PathHopModel, db_path_hop_pk, raise_if_not_found=False) - if db_path_hop is None: return - db_path_hop.delete() - -def delete_all_path_hops( - database : Database, db_path : PathHopModel - ) -> None: - - db_path_hop_pks = db_path.references(PathHopModel) - for pk,_ in db_path_hop_pks: PathHopModel(database, pk).delete() - -def set_path( - database : Database, connection_uuid : str, raw_endpoint_ids : List[EndPointId], path_name : str = '' - ) -> List[Union[PathModel, PathHopModel]]: - - str_path_key = connection_uuid if len(path_name) == 0 else key_to_str([connection_uuid, path_name], separator=':') - result : Tuple[PathModel, bool] = get_or_create_object(database, PathModel, str_path_key) - db_path, created = result # pylint: disable=unused-variable - - db_path_hop_pks : Set[str] = set(map(operator.itemgetter(0), db_path.references(PathHopModel))) - db_objects : List[Tuple[Union[PathModel, PathHopModel], bool]] = [db_path] +class ConnectionEndPointModel(_Base): + __tablename__ = 'connection_endpoint' - for position,endpoint_id in enumerate(raw_endpoint_ids): - endpoint_uuid = endpoint_id.endpoint_uuid.uuid - endpoint_device_uuid = endpoint_id.device_id.device_uuid.uuid - endpoint_topology_uuid = endpoint_id.topology_id.topology_uuid.uuid - endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid + connection_uuid = Column(ForeignKey('connection.connection_uuid', ondelete='CASCADE' ), primary_key=True) + endpoint_uuid = Column(ForeignKey('endpoint.endpoint_uuid', ondelete='RESTRICT'), primary_key=True) + position = Column(Integer, nullable=False) - str_endpoint_key = key_to_str([endpoint_device_uuid, endpoint_uuid]) - if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0: - str_topology_key = key_to_str([endpoint_topology_context_uuid, endpoint_topology_uuid]) - str_endpoint_key = key_to_str([str_endpoint_key, str_topology_key], separator=':') + connection = relationship('ConnectionModel', back_populates='connection_endpoints', lazy='joined') + endpoint = relationship('EndPointModel', lazy='joined') # back_populates='connection_endpoints' - db_endpoint : EndPointModel = get_object(database, EndPointModel, str_endpoint_key) + __table_args__ = ( + CheckConstraint(position >= 0, name='check_position_value'), + ) - result : Tuple[PathHopModel, bool] = set_path_hop(database, db_path, position, db_endpoint) - db_path_hop, updated = result # pylint: disable=unused-variable - db_objects.append(db_path_hop) - db_path_hop_pks.discard(db_path_hop.instance_key) +class ConnectionSubServiceModel(_Base): + __tablename__ = 'connection_subservice' - for db_path_hop_pk in db_path_hop_pks: delete_path_hop(database, db_path, db_path_hop_pk) + connection_uuid = Column(ForeignKey('connection.connection_uuid', ondelete='CASCADE' ), primary_key=True) + subservice_uuid = Column(ForeignKey('service.service_uuid', ondelete='RESTRICT'), primary_key=True) - return db_objects + connection = relationship('ConnectionModel', back_populates='connection_subservices', lazy='joined') + subservice = relationship('ServiceModel', lazy='joined') # back_populates='connection_subservices' diff --git a/src/context/service/database/models/ConstraintModel.py b/src/context/service/database/models/ConstraintModel.py index 90adb9ce7..30ade508e 100644 --- a/src/context/service/database/models/ConstraintModel.py +++ b/src/context/service/database/models/ConstraintModel.py @@ -18,7 +18,7 @@ from sqlalchemy.dialects.postgresql import UUID from typing import Dict from ._Base import _Base -# Enum values should match name of field in ConstraintModel +# Enum values should match name of field in Constraint message class ConstraintKindEnum(enum.Enum): CUSTOM = 'custom' ENDPOINT_LOCATION_REGION = 'ep_loc_region' diff --git a/src/context/service/database/models/ContextModel.py b/src/context/service/database/models/ContextModel.py index ffeb10111..8dc5f545f 100644 --- a/src/context/service/database/models/ContextModel.py +++ b/src/context/service/database/models/ContextModel.py @@ -12,10 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Dict from sqlalchemy import Column, String from sqlalchemy.dialects.postgresql import UUID from sqlalchemy.orm import relationship +from typing import Dict from ._Base import _Base class ContextModel(_Base): diff --git a/src/context/service/database/models/DeviceModel.py b/src/context/service/database/models/DeviceModel.py index 74fa70cf8..2deb688e1 100644 --- a/src/context/service/database/models/DeviceModel.py +++ b/src/context/service/database/models/DeviceModel.py @@ -13,10 +13,10 @@ # limitations under the License. import operator -from typing import Dict from sqlalchemy import Column, Enum, String from sqlalchemy.dialects.postgresql import ARRAY, UUID from sqlalchemy.orm import relationship +from typing import Dict from .enums.DeviceDriver import ORM_DeviceDriverEnum from .enums.DeviceOperationalStatus import ORM_DeviceOperationalStatusEnum from ._Base import _Base diff --git a/src/context/service/database/models/EndPointModel.py b/src/context/service/database/models/EndPointModel.py index b69b4978b..4151cfe0d 100644 --- a/src/context/service/database/models/EndPointModel.py +++ b/src/context/service/database/models/EndPointModel.py @@ -12,10 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Dict from sqlalchemy import Column, Enum, ForeignKey, String from sqlalchemy.dialects.postgresql import ARRAY, UUID from sqlalchemy.orm import relationship +from typing import Dict from .enums.KpiSampleType import ORM_KpiSampleTypeEnum from ._Base import _Base diff --git a/src/context/service/database/models/LinkModel.py b/src/context/service/database/models/LinkModel.py index 950f48763..ecad01972 100644 --- a/src/context/service/database/models/LinkModel.py +++ b/src/context/service/database/models/LinkModel.py @@ -12,10 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Dict from sqlalchemy import Column, ForeignKey, String from sqlalchemy.dialects.postgresql import UUID from sqlalchemy.orm import relationship +from typing import Dict from ._Base import _Base class LinkModel(_Base): diff --git a/src/context/service/database/models/PolicyRuleModel.py b/src/context/service/database/models/PolicyRuleModel.py index 7c84ea940..8fc111087 100644 --- a/src/context/service/database/models/PolicyRuleModel.py +++ b/src/context/service/database/models/PolicyRuleModel.py @@ -12,21 +12,65 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging -import json +import enum, json +from sqlalchemy import CheckConstraint, Column, Enum, ForeignKey, Integer, String +from sqlalchemy.dialects.postgresql import UUID +from sqlalchemy.orm import relationship from typing import Dict -from common.orm.fields.PrimaryKeyField import PrimaryKeyField -from common.orm.fields.StringField import StringField -from common.orm.model.Model import Model -LOGGER = logging.getLogger(__name__) +from context.service.database.models.enums.PolicyRuleState import ORM_PolicyRuleStateEnum +from ._Base import _Base -class PolicyRuleModel(Model): - pk = PrimaryKeyField() - value = StringField(required=True, allow_empty=False) +# Enum values should match name of field in PolicyRule message +class PolicyRuleKindEnum(enum.Enum): + DEVICE = 'device' + SERVICE = 'service' + +class PolicyRuleModel(_Base): + __tablename__ = 'policyrule' + + policyrule_uuid = Column(UUID(as_uuid=False), primary_key=True) + policyrule_kind = Column(Enum(PolicyRuleKindEnum), nullable=False) + policyrule_state = Column(Enum(ORM_PolicyRuleStateEnum), nullable=False) + policyrule_state_message = Column(String, nullable=False) + policyrule_priority = Column(Integer, nullable=False) + policyrule_service_uuid = Column(ForeignKey('service.service_uuid', ondelete='RESTRICT'), nullable=True) + policyrule_eca_data = Column(String, nullable=False) + + policyrule_service = relationship('ServiceModel') # back_populates='policyrules' + policyrule_devices = relationship('PolicyRuleDeviceModel' ) # back_populates='policyrule' + + __table_args__ = ( + CheckConstraint(policyrule_priority >= 0, name='check_priority_value'), + ) def dump_id(self) -> Dict: - return {'uuid': {'uuid': self.pk}} + return {'uuid': {'uuid': self.policyrule_uuid}} def dump(self) -> Dict: - return json.loads(self.value) + # Load JSON-encoded Event-Condition-Action (ECA) model data and populate with policy basic details + policyrule_basic = json.loads(self.policyrule_eca_data) + policyrule_basic.update({ + 'policyRuleId': self.dump_id(), + 'policyRuleState': { + 'policyRuleState': self.policyrule_state.value, + 'policyRuleStateMessage': self.policyrule_state_message, + }, + 'priority': self.policyrule_priority, + }) + result = { + 'policyRuleBasic': policyrule_basic, + 'deviceList': [{'device_uuid': {'uuid': pr_d.device_uuid}} for pr_d in self.policyrule_devices], + } + if self.policyrule_kind == PolicyRuleKindEnum.SERVICE: + result['serviceId'] = self.policyrule_service.dump_id(), + return {self.policyrule_kind.value: result} + +class PolicyRuleDeviceModel(_Base): + __tablename__ = 'policyrule_device' + + policyrule_uuid = Column(ForeignKey('policyrule.policyrule_uuid', ondelete='RESTRICT'), primary_key=True) + device_uuid = Column(ForeignKey('device.device_uuid', ondelete='RESTRICT'), primary_key=True) + + #policyrule = relationship('PolicyRuleModel', lazy='joined') # back_populates='policyrule_devices' + device = relationship('DeviceModel', lazy='joined') # back_populates='policyrule_devices' diff --git a/src/context/service/database/models/ServiceModel.py b/src/context/service/database/models/ServiceModel.py index e1e57f4c7..7343b5ade 100644 --- a/src/context/service/database/models/ServiceModel.py +++ b/src/context/service/database/models/ServiceModel.py @@ -13,10 +13,10 @@ # limitations under the License. import operator -from typing import Dict from sqlalchemy import Column, Enum, ForeignKey, String from sqlalchemy.dialects.postgresql import UUID from sqlalchemy.orm import relationship +from typing import Dict from .enums.ServiceStatus import ORM_ServiceStatusEnum from .enums.ServiceType import ORM_ServiceTypeEnum from ._Base import _Base diff --git a/src/context/service/database/models/TopologyModel.py b/src/context/service/database/models/TopologyModel.py index ef1ae0be8..14fdaaeec 100644 --- a/src/context/service/database/models/TopologyModel.py +++ b/src/context/service/database/models/TopologyModel.py @@ -12,10 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Dict from sqlalchemy import Column, ForeignKey, String from sqlalchemy.dialects.postgresql import UUID from sqlalchemy.orm import relationship +from typing import Dict from ._Base import _Base class TopologyModel(_Base): diff --git a/src/context/service/database/models/enums/PolicyRuleState.py b/src/context/service/database/models/enums/PolicyRuleState.py new file mode 100644 index 000000000..9917b1819 --- /dev/null +++ b/src/context/service/database/models/enums/PolicyRuleState.py @@ -0,0 +1,33 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import enum, functools +from common.proto.policy_pb2 import PolicyRuleStateEnum +from ._GrpcToEnum import grpc_to_enum + +class ORM_PolicyRuleStateEnum(enum.Enum): + UNDEFINED = PolicyRuleStateEnum.POLICY_UNDEFINED # Undefined rule state + FAILED = PolicyRuleStateEnum.POLICY_FAILED # Rule failed + INSERTED = PolicyRuleStateEnum.POLICY_INSERTED # Rule is just inserted + VALIDATED = PolicyRuleStateEnum.POLICY_VALIDATED # Rule content is correct + PROVISIONED = PolicyRuleStateEnum.POLICY_PROVISIONED # Rule subscribed to Monitoring + ACTIVE = PolicyRuleStateEnum.POLICY_ACTIVE # Rule is currently active (alarm is just thrown by Monitoring) + ENFORCED = PolicyRuleStateEnum.POLICY_ENFORCED # Rule action is successfully enforced + INEFFECTIVE = PolicyRuleStateEnum.POLICY_INEFFECTIVE # The applied rule action did not work as expected + EFFECTIVE = PolicyRuleStateEnum.POLICY_EFFECTIVE # The applied rule action did work as expected + UPDATED = PolicyRuleStateEnum.POLICY_UPDATED # Operator requires a policy to change + REMOVED = PolicyRuleStateEnum.POLICY_REMOVED # Operator requires to remove a policy + +grpc_to_enum__policyrule_state = functools.partial( + grpc_to_enum, PolicyRuleStateEnum, ORM_PolicyRuleStateEnum, grpc_enum_prefix='POLICY_') diff --git a/src/context/service/database/models/enums/_GrpcToEnum.py b/src/context/service/database/models/enums/_GrpcToEnum.py index df70399f9..f4fe6c1cc 100644 --- a/src/context/service/database/models/enums/_GrpcToEnum.py +++ b/src/context/service/database/models/enums/_GrpcToEnum.py @@ -14,19 +14,25 @@ import re from enum import Enum +from typing import Optional # Enumeration classes are redundant with gRPC classes, but gRPC does not provide a programmatical method to retrieve # the values it expects from strings containing the desired value symbol or its integer value, so a kind of mapping is # required. Besides, ORM Models expect Enum classes in EnumeratedFields; we create specific and conveniently defined # Enum classes to serve both purposes. -def grpc_to_enum(grpc_enum_class, orm_enum_class : Enum, grpc_enum_value): - grpc_enum_name = grpc_enum_class.Name(grpc_enum_value) - grpc_enum_prefix = orm_enum_class.__name__.upper() - #grpc_enum_prefix = re.sub(r'^ORM_(.+)$', r'\1', grpc_enum_prefix) - #grpc_enum_prefix = re.sub(r'^(.+)ENUM$', r'\1', grpc_enum_prefix) - #grpc_enum_prefix = grpc_enum_prefix + '_' - grpc_enum_prefix = re.sub(r'^ORM_(.+)ENUM$', r'\1_', grpc_enum_prefix) - orm_enum_name = grpc_enum_name.replace(grpc_enum_prefix, '') - orm_enum_value = orm_enum_class._member_map_.get(orm_enum_name) +def grpc_to_enum(grpc_enum_class, orm_enum_class : Enum, grpc_enum_value, grpc_enum_prefix : Optional[str] = None): + enum_name = grpc_enum_class.Name(grpc_enum_value) + + if grpc_enum_prefix is None: + grpc_enum_prefix = orm_enum_class.__name__.upper() + #grpc_enum_prefix = re.sub(r'^ORM_(.+)$', r'\1', grpc_enum_prefix) + #grpc_enum_prefix = re.sub(r'^(.+)ENUM$', r'\1', grpc_enum_prefix) + #grpc_enum_prefix = grpc_enum_prefix + '_' + grpc_enum_prefix = re.sub(r'^ORM_(.+)ENUM$', r'\1_', grpc_enum_prefix) + + if len(grpc_enum_prefix) > 0: + enum_name = enum_name.replace(grpc_enum_prefix, '') + + orm_enum_value = orm_enum_class._member_map_.get(enum_name) return orm_enum_value diff --git a/src/context/service/database/uuids/Connection.py b/src/context/service/database/uuids/Connection.py new file mode 100644 index 000000000..24c2e9977 --- /dev/null +++ b/src/context/service/database/uuids/Connection.py @@ -0,0 +1,33 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from common.proto.context_pb2 import ConnectionId +from common.rpc_method_wrapper.ServiceExceptions import InvalidArgumentsException +from ._Builder import get_uuid_from_string, get_uuid_random + +def connection_get_uuid( + connection_id : ConnectionId, connection_name : str = '', allow_random : bool = False +) -> str: + connection_uuid = connection_id.connection_uuid.uuid + + if len(connection_uuid) > 0: + return get_uuid_from_string(connection_uuid) + if len(connection_name) > 0: + return get_uuid_from_string(connection_name) + if allow_random: return get_uuid_random() + + raise InvalidArgumentsException([ + ('connection_id.connection_uuid.uuid', connection_uuid), + ('name', connection_name), + ], extra_details=['At least one is required to produce a Connection UUID']) diff --git a/src/context/service/database/uuids/PolicuRule.py b/src/context/service/database/uuids/PolicuRule.py new file mode 100644 index 000000000..d5266ad11 --- /dev/null +++ b/src/context/service/database/uuids/PolicuRule.py @@ -0,0 +1,29 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from common.proto.policy_pb2 import PolicyRuleId +from common.rpc_method_wrapper.ServiceExceptions import InvalidArgumentException +from ._Builder import get_uuid_from_string, get_uuid_random + +def policyrule_get_uuid( + policyrule_id : PolicyRuleId, allow_random : bool = False +) -> str: + policyrule_uuid = policyrule_id.uuid.uuid + + if len(policyrule_uuid) > 0: + return get_uuid_from_string(policyrule_uuid) + if allow_random: return get_uuid_random() + + raise InvalidArgumentException( + 'policyrule_id.uuid.uuid', policyrule_uuid, extra_details=['Required to produce a PolicyRule UUID']) diff --git a/src/context/tests/Objects.py b/src/context/tests/Objects.py index 93dd6f2c6..19d53619c 100644 --- a/src/context/tests/Objects.py +++ b/src/context/tests/Objects.py @@ -25,7 +25,7 @@ from common.tools.object_factory.Link import json_link, json_link_id from common.tools.object_factory.Service import json_service_id, json_service_l3nm_planned from common.tools.object_factory.Slice import json_slice_id, json_slice from common.tools.object_factory.Topology import json_topology, json_topology_id -from common.tools.object_factory.PolicyRule import json_policy_rule, json_policy_rule_id +from common.tools.object_factory.PolicyRule import json_policyrule, json_policyrule_id # ----- Context -------------------------------------------------------------------------------------------------------- @@ -170,6 +170,6 @@ CONNECTION_R1_R3_NAME, CONNECTION_R1_R3_ID, CONNECTION_R1_R3 = compose_connectio # ----- PolicyRule ------------------------------------------------------------------------------------------------------- -POLICY_RULE_NAME = '56380225-3e40-4f74-9162-529f8dcb96a1' -POLICY_RULE_ID = json_policy_rule_id(POLICY_RULE_NAME) -POLICY_RULE = json_policy_rule(POLICY_RULE_NAME) +POLICYRULE_NAME = 'my-device-policy' +POLICYRULE_ID = json_policyrule_id(POLICYRULE_NAME) +POLICYRULE = json_policyrule(POLICYRULE_NAME, policy_priority=1) diff --git a/src/context/tests/_test_connection.py b/src/context/tests/_test_connection.py deleted file mode 100644 index b6060df68..000000000 --- a/src/context/tests/_test_connection.py +++ /dev/null @@ -1,280 +0,0 @@ -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import copy, grpc, pytest -from typing import Tuple -from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID -from common.proto.context_pb2 import ( - Connection, ConnectionEvent, ConnectionId, Context, ContextEvent, ContextId, Device, DeviceEvent, DeviceId, - EventTypeEnum, Service, ServiceEvent, ServiceId, Topology, TopologyEvent, TopologyId) -from context.client.ContextClient import ContextClient -from context.client.EventsCollector import EventsCollector -from .Objects import ( - CONNECTION_R1_R3, CONNECTION_R1_R3_ID, CONNECTION_R1_R3_UUID, CONTEXT, CONTEXT_ID, DEVICE_R1, DEVICE_R1_ID, - DEVICE_R1_UUID, DEVICE_R2, DEVICE_R2_ID, DEVICE_R2_UUID, DEVICE_R3, DEVICE_R3_ID, DEVICE_R3_UUID, SERVICE_R1_R2, - SERVICE_R1_R2_ID, SERVICE_R1_R2_UUID, SERVICE_R1_R3, SERVICE_R1_R3_ID, SERVICE_R1_R3_UUID, SERVICE_R2_R3, - SERVICE_R2_R3_ID, SERVICE_R2_R3_UUID, TOPOLOGY, TOPOLOGY_ID) - -def grpc_connection( - context_client_grpc : ContextClient, # pylint: disable=redefined-outer-name - context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name - Session = context_db_mb[0] - - database = Database(Session) - - # ----- Clean the database ----------------------------------------------------------------------------------------- - database.clear() - - # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- - events_collector = EventsCollector(context_client_grpc) - events_collector.start() - - # ----- Prepare dependencies for the test and capture related events ----------------------------------------------- - response = context_client_grpc.SetContext(Context(**CONTEXT)) - assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - response = context_client_grpc.SetTopology(Topology(**TOPOLOGY)) - assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - - response = context_client_grpc.SetDevice(Device(**DEVICE_R1)) - assert response.device_uuid.uuid == DEVICE_R1_UUID - - response = context_client_grpc.SetDevice(Device(**DEVICE_R2)) - assert response.device_uuid.uuid == DEVICE_R2_UUID - - response = context_client_grpc.SetDevice(Device(**DEVICE_R3)) - assert response.device_uuid.uuid == DEVICE_R3_UUID - - response = context_client_grpc.SetService(Service(**SERVICE_R1_R2)) - assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.service_uuid.uuid == SERVICE_R1_R2_UUID - - CONTEXT_WITH_SERVICE = copy.deepcopy(CONTEXT) - CONTEXT_WITH_SERVICE['service_ids'].append(SERVICE_R1_R2_ID) - response = context_client_grpc.SetContext(Context(**CONTEXT_WITH_SERVICE)) - assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - response = context_client_grpc.SetService(Service(**SERVICE_R2_R3)) - assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.service_uuid.uuid == SERVICE_R2_R3_UUID - - CONTEXT_WITH_SERVICE = copy.deepcopy(CONTEXT) - CONTEXT_WITH_SERVICE['service_ids'].append(SERVICE_R2_R3_ID) - response = context_client_grpc.SetContext(Context(**CONTEXT_WITH_SERVICE)) - assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - response = context_client_grpc.SetService(Service(**SERVICE_R1_R3)) - assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.service_uuid.uuid == SERVICE_R1_R3_UUID - - CONTEXT_WITH_SERVICE = copy.deepcopy(CONTEXT) - CONTEXT_WITH_SERVICE['service_ids'].append(SERVICE_R1_R3_ID) - response = context_client_grpc.SetContext(Context(**CONTEXT_WITH_SERVICE)) - assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - events = events_collector.get_events(block=True, count=11) - - assert isinstance(events[0], ContextEvent) - assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - assert isinstance(events[1], TopologyEvent) - assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[1].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert events[1].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - - assert isinstance(events[2], DeviceEvent) - assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[2].device_id.device_uuid.uuid == DEVICE_R1_UUID - - assert isinstance(events[3], DeviceEvent) - assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[3].device_id.device_uuid.uuid == DEVICE_R2_UUID - - assert isinstance(events[4], DeviceEvent) - assert events[4].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[4].device_id.device_uuid.uuid == DEVICE_R3_UUID - - assert isinstance(events[5], ServiceEvent) - assert events[5].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[5].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert events[5].service_id.service_uuid.uuid == SERVICE_R1_R2_UUID - - assert isinstance(events[6], ContextEvent) - assert events[6].event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - assert events[6].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - assert isinstance(events[7], ServiceEvent) - assert events[7].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[7].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert events[7].service_id.service_uuid.uuid == SERVICE_R2_R3_UUID - - assert isinstance(events[8], ContextEvent) - assert events[8].event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - assert events[8].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - assert isinstance(events[9], ServiceEvent) - assert events[9].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[9].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert events[9].service_id.service_uuid.uuid == SERVICE_R1_R3_UUID - - assert isinstance(events[10], ContextEvent) - assert events[10].event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - assert events[10].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - # ----- Get when the object does not exist ------------------------------------------------------------------------- - with pytest.raises(grpc.RpcError) as e: - context_client_grpc.GetConnection(ConnectionId(**CONNECTION_R1_R3_ID)) - assert e.value.code() == grpc.StatusCode.NOT_FOUND - assert e.value.details() == 'Connection({:s}) not found'.format(CONNECTION_R1_R3_UUID) - - # ----- List when the object does not exist ------------------------------------------------------------------------ - response = context_client_grpc.ListConnectionIds(ServiceId(**SERVICE_R1_R3_ID)) - assert len(response.connection_ids) == 0 - - response = context_client_grpc.ListConnections(ServiceId(**SERVICE_R1_R3_ID)) - assert len(response.connections) == 0 - - # ----- Dump state of database before create the object ------------------------------------------------------------ - db_entries = context_database.dump() - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 187 - - # ----- Create the object ------------------------------------------------------------------------------------------ - with pytest.raises(grpc.RpcError) as e: - WRONG_CONNECTION = copy.deepcopy(CONNECTION_R1_R3) - WRONG_CONNECTION['path_hops_endpoint_ids'][0]\ - ['topology_id']['context_id']['context_uuid']['uuid'] = 'wrong-context-uuid' - context_client_grpc.SetConnection(Connection(**WRONG_CONNECTION)) - assert e.value.code() == grpc.StatusCode.NOT_FOUND - # TODO: should we check that all endpoints belong to same topology? - # TODO: should we check that endpoints form links over the topology? - msg = 'EndPoint({:s}/{:s}:wrong-context-uuid/{:s}) not found'.format( - DEVICE_R1_UUID, WRONG_CONNECTION['path_hops_endpoint_ids'][0]['endpoint_uuid']['uuid'], DEFAULT_TOPOLOGY_UUID) - assert e.value.details() == msg - - response = context_client_grpc.SetConnection(Connection(**CONNECTION_R1_R3)) - assert response.connection_uuid.uuid == CONNECTION_R1_R3_UUID - - # ----- Check create event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True) - assert isinstance(event, ConnectionEvent) - assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert event.connection_id.connection_uuid.uuid == CONNECTION_R1_R3_UUID - - # ----- Update the object ------------------------------------------------------------------------------------------ - response = context_client_grpc.SetConnection(Connection(**CONNECTION_R1_R3)) - assert response.connection_uuid.uuid == CONNECTION_R1_R3_UUID - - # ----- Check update event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True) - assert isinstance(event, ConnectionEvent) - assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - assert event.connection_id.connection_uuid.uuid == CONNECTION_R1_R3_UUID - - # ----- Dump state of database after create/update the object ------------------------------------------------------ - db_entries = context_database.dump() - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 203 - - # ----- Get when the object exists --------------------------------------------------------------------------------- - response = context_client_grpc.GetConnection(ConnectionId(**CONNECTION_R1_R3_ID)) - assert response.connection_id.connection_uuid.uuid == CONNECTION_R1_R3_UUID - assert response.service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert response.service_id.service_uuid.uuid == SERVICE_R1_R3_UUID - assert len(response.path_hops_endpoint_ids) == 6 - assert len(response.sub_service_ids) == 2 - - # ----- List when the object exists -------------------------------------------------------------------------------- - response = context_client_grpc.ListConnectionIds(ServiceId(**SERVICE_R1_R3_ID)) - assert len(response.connection_ids) == 1 - assert response.connection_ids[0].connection_uuid.uuid == CONNECTION_R1_R3_UUID - - response = context_client_grpc.ListConnections(ServiceId(**SERVICE_R1_R3_ID)) - assert len(response.connections) == 1 - assert response.connections[0].connection_id.connection_uuid.uuid == CONNECTION_R1_R3_UUID - assert len(response.connections[0].path_hops_endpoint_ids) == 6 - assert len(response.connections[0].sub_service_ids) == 2 - - # ----- Remove the object ------------------------------------------------------------------------------------------ - context_client_grpc.RemoveConnection(ConnectionId(**CONNECTION_R1_R3_ID)) - context_client_grpc.RemoveService(ServiceId(**SERVICE_R1_R3_ID)) - context_client_grpc.RemoveService(ServiceId(**SERVICE_R2_R3_ID)) - context_client_grpc.RemoveService(ServiceId(**SERVICE_R1_R2_ID)) - context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R1_ID)) - context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R2_ID)) - context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R3_ID)) - context_client_grpc.RemoveTopology(TopologyId(**TOPOLOGY_ID)) - context_client_grpc.RemoveContext(ContextId(**CONTEXT_ID)) - - # ----- Check remove event ----------------------------------------------------------------------------------------- - events = events_collector.get_events(block=True, count=9) - - assert isinstance(events[0], ConnectionEvent) - assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[0].connection_id.connection_uuid.uuid == CONNECTION_R1_R3_UUID - - assert isinstance(events[1], ServiceEvent) - assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[1].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert events[1].service_id.service_uuid.uuid == SERVICE_R1_R3_UUID - - assert isinstance(events[2], ServiceEvent) - assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[2].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert events[2].service_id.service_uuid.uuid == SERVICE_R2_R3_UUID - - assert isinstance(events[3], ServiceEvent) - assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[3].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert events[3].service_id.service_uuid.uuid == SERVICE_R1_R2_UUID - - assert isinstance(events[4], DeviceEvent) - assert events[4].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[4].device_id.device_uuid.uuid == DEVICE_R1_UUID - - assert isinstance(events[5], DeviceEvent) - assert events[5].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[5].device_id.device_uuid.uuid == DEVICE_R2_UUID - - assert isinstance(events[6], DeviceEvent) - assert events[6].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[6].device_id.device_uuid.uuid == DEVICE_R3_UUID - - assert isinstance(events[7], TopologyEvent) - assert events[7].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[7].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - assert events[7].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID - - assert isinstance(events[8], ContextEvent) - assert events[8].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[8].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID - - # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- - events_collector.stop() - - # ----- Dump state of database after remove the object ------------------------------------------------------------- - db_entries = context_database.dump() - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 0 diff --git a/src/context/tests/_test_policy.py b/src/context/tests/_test_policy.py deleted file mode 100644 index e416575f7..000000000 --- a/src/context/tests/_test_policy.py +++ /dev/null @@ -1,114 +0,0 @@ -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import grpc, pytest -from typing import Tuple -from common.proto.context_pb2 import Empty -from common.proto.policy_pb2 import PolicyRuleId, PolicyRule -from context.client.ContextClient import ContextClient -#from context.client.EventsCollector import EventsCollector -from .Objects import POLICY_RULE, POLICY_RULE_ID, POLICY_RULE_UUID - -def grpc_policy( - context_client_grpc : ContextClient, # pylint: disable=redefined-outer-name - context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name - context_database = context_db_mb[0] - - # ----- Clean the database ----------------------------------------------------------------------------------------- - context_database.clear_all() - - # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- - #events_collector = EventsCollector(context_client_grpc) - #events_collector.start() - - # ----- Get when the object does not exist ------------------------------------------------------------------------- - POLICY_ID = 'no-uuid' - DEFAULT_POLICY_ID = {'uuid': {'uuid': POLICY_ID}} - - with pytest.raises(grpc.RpcError) as e: - context_client_grpc.GetPolicyRule(PolicyRuleId(**DEFAULT_POLICY_ID)) - - assert e.value.code() == grpc.StatusCode.NOT_FOUND - assert e.value.details() == 'PolicyRule({:s}) not found'.format(POLICY_ID) - - # ----- List when the object does not exist ------------------------------------------------------------------------ - response = context_client_grpc.ListPolicyRuleIds(Empty()) - assert len(response.policyRuleIdList) == 0 - - response = context_client_grpc.ListPolicyRules(Empty()) - assert len(response.policyRules) == 0 - - # ----- Dump state of database before create the object ------------------------------------------------------------ - db_entries = context_database.dump() - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 0 - - # ----- Create the object ------------------------------------------------------------------------------------------ - response = context_client_grpc.SetPolicyRule(PolicyRule(**POLICY_RULE)) - assert response.uuid.uuid == POLICY_RULE_UUID - - # ----- Check create event ----------------------------------------------------------------------------------------- - # events = events_collector.get_events(block=True, count=1) - # assert isinstance(events[0], PolicyEvent) - # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - # assert events[0].policy_id.uuid.uuid == POLICY_RULE_UUID - - # ----- Update the object ------------------------------------------------------------------------------------------ - response = context_client_grpc.SetPolicyRule(PolicyRule(**POLICY_RULE)) - assert response.uuid.uuid == POLICY_RULE_UUID - - # ----- Dump state of database after create/update the object ------------------------------------------------------ - db_entries = context_database.dump() - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 2 - - # ----- Get when the object exists --------------------------------------------------------------------------------- - response = context_client_grpc.GetPolicyRule(PolicyRuleId(**POLICY_RULE_ID)) - assert response.device.policyRuleBasic.policyRuleId.uuid.uuid == POLICY_RULE_UUID - - # ----- List when the object exists -------------------------------------------------------------------------------- - response = context_client_grpc.ListPolicyRuleIds(Empty()) - assert len(response.policyRuleIdList) == 1 - assert response.policyRuleIdList[0].uuid.uuid == POLICY_RULE_UUID - - response = context_client_grpc.ListPolicyRules(Empty()) - assert len(response.policyRules) == 1 - - # ----- Remove the object ------------------------------------------------------------------------------------------ - context_client_grpc.RemovePolicyRule(PolicyRuleId(**POLICY_RULE_ID)) - - # ----- Check remove event ----------------------------------------------------------------------------------------- - # events = events_collector.get_events(block=True, count=2) - - # assert isinstance(events[0], PolicyEvent) - # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - # assert events[0].policy_id.uuid.uuid == POLICY_RULE_UUID - - - # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- - # events_collector.stop() - - # ----- Dump state of database after remove the object ------------------------------------------------------------- - db_entries = context_database.dump() - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 0 diff --git a/src/context/tests/conftest.py b/src/context/tests/conftest.py index f5ef4efca..38e488af4 100644 --- a/src/context/tests/conftest.py +++ b/src/context/tests/conftest.py @@ -25,7 +25,6 @@ from common.message_broker.Factory import get_messagebroker_backend, BackendEnum from common.message_broker.MessageBroker import MessageBroker from context.client.ContextClient import ContextClient from context.service.ContextService import ContextService -from context.service.Database import Database from context.service.Engine import Engine from context.service.database.models._Base import rebuild_database @@ -51,7 +50,9 @@ def context_db_mb(request) -> Tuple[sqlalchemy.engine.Engine, MessageBroker]: RAW_METRICS = dict() @pytest.fixture(scope='session') -def context_service(context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name +def context_service( + context_db_mb : Tuple[sqlalchemy.engine.Engine, MessageBroker] # pylint: disable=redefined-outer-name +): global RAW_METRICS # pylint: disable=global-statement _service = ContextService(context_db_mb[0], context_db_mb[1]) RAW_METRICS = _service.context_servicer._get_metrics() @@ -93,7 +94,7 @@ def pytest_terminal_summary( return float(str_duration.replace(' ms', '')) field_names = ['Method', 'TOT', 'OK', 'ERR', 'avg(Dur)'] - bucket_bounds = sorted(bucket_bounds, key=lambda b: float(b)) + bucket_bounds = sorted(bucket_bounds, key=float) # convert buckets to float to get the key bucket_column_names = ['<={:s}'.format(bucket_bound) for bucket_bound in bucket_bounds] field_names.extend(bucket_column_names) diff --git a/src/context/tests/test_connection.py b/src/context/tests/test_connection.py new file mode 100644 index 000000000..f28fde356 --- /dev/null +++ b/src/context/tests/test_connection.py @@ -0,0 +1,251 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy, grpc, pytest +from common.proto.context_pb2 import ( + Connection, ConnectionId, Context, ContextId, Device, DeviceId, EndPointId, Service, ServiceId, Topology, TopologyId) +from context.client.ContextClient import ContextClient +from context.service.database.uuids.Connection import connection_get_uuid +from context.service.database.uuids.EndPoint import endpoint_get_uuid +#from context.client.EventsCollector import EventsCollector +from .Objects import ( + CONNECTION_R1_R3, CONNECTION_R1_R3_ID, CONNECTION_R1_R3_NAME, CONTEXT, CONTEXT_ID, DEVICE_R1, DEVICE_R1_ID, + DEVICE_R2, DEVICE_R2_ID, DEVICE_R3, DEVICE_R3_ID, SERVICE_R1_R2, SERVICE_R1_R2_ID, SERVICE_R1_R3, SERVICE_R1_R3_ID, + SERVICE_R2_R3, SERVICE_R2_R3_ID, TOPOLOGY, TOPOLOGY_ID) + +#@pytest.mark.depends(on=['context/tests/test_service.py::test_service', 'context/tests/test_slice.py::test_slice']) +def test_connection(context_client : ContextClient) -> None: + + # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- + #events_collector = EventsCollector( + # context_client, log_events_received=True, + # activate_context_collector = False, activate_topology_collector = False, activate_device_collector = False, + # activate_link_collector = False, activate_service_collector = False, activate_slice_collector = False, + # activate_connection_collector = True) + #events_collector.start() + + # ----- Prepare dependencies for the test and capture related events ----------------------------------------------- + response = context_client.SetContext(Context(**CONTEXT)) + context_uuid = response.context_uuid.uuid + + response = context_client.SetTopology(Topology(**TOPOLOGY)) + assert response.context_id.context_uuid.uuid == context_uuid + topology_uuid = response.topology_uuid.uuid + + response = context_client.SetDevice(Device(**DEVICE_R1)) + device_r1_uuid = response.device_uuid.uuid + + response = context_client.SetDevice(Device(**DEVICE_R2)) + device_r2_uuid = response.device_uuid.uuid # pylint: disable=unused-variable + + response = context_client.SetDevice(Device(**DEVICE_R3)) + device_r3_uuid = response.device_uuid.uuid # pylint: disable=unused-variable + + response = context_client.SetService(Service(**SERVICE_R1_R2)) + assert response.context_id.context_uuid.uuid == context_uuid + service_r1_r2_uuid = response.service_uuid.uuid # pylint: disable=unused-variable + + response = context_client.SetService(Service(**SERVICE_R2_R3)) + assert response.context_id.context_uuid.uuid == context_uuid + service_r2_r3_uuid = response.service_uuid.uuid # pylint: disable=unused-variable + + response = context_client.SetService(Service(**SERVICE_R1_R3)) + assert response.context_id.context_uuid.uuid == context_uuid + service_r1_r3_uuid = response.service_uuid.uuid + + #events = events_collector.get_events(block=True, count=8) + #assert isinstance(events[0], ContextEvent) + #assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert events[0].context_id.context_uuid.uuid == context_uuid + #assert isinstance(events[1], TopologyEvent) + #assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert events[1].topology_id.context_id.context_uuid.uuid == context_uuid + #assert events[1].topology_id.topology_uuid.uuid == topology_uuid + #assert isinstance(events[2], DeviceEvent) + #assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert events[2].device_id.device_uuid.uuid == device_r1_uuid + #assert isinstance(events[3], DeviceEvent) + #assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert events[3].device_id.device_uuid.uuid == device_r2_uuid + #assert isinstance(events[4], DeviceEvent) + #assert events[4].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert events[4].device_id.device_uuid.uuid == device_r3_uuid + #assert isinstance(events[5], ServiceEvent) + #assert events[5].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert events[5].service_id.context_id.context_uuid.uuid == context_uuid + #assert events[5].service_id.service_uuid.uuid == service_r1_r2_uuid + #assert isinstance(events[6], ContextEvent) + #assert events[6].event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + #assert events[6].context_id.context_uuid.uuid == context_uuid + #assert isinstance(events[7], ServiceEvent) + #assert events[7].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert events[7].service_id.context_id.context_uuid.uuid == context_uuid + #assert events[7].service_id.service_uuid.uuid == service_r2_r3_uuid + #assert isinstance(events[8], ContextEvent) + #assert events[8].event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + #assert events[8].context_id.context_uuid.uuid == context_uuid + #assert isinstance(events[9], ServiceEvent) + #assert events[9].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert events[9].service_id.context_id.context_uuid.uuid == context_uuid + #assert events[9].service_id.service_uuid.uuid == service_r1_r3_uuid + #assert isinstance(events[10], ContextEvent) + #assert events[10].event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + #assert events[10].context_id.context_uuid.uuid == context_uuid + + # ----- Get when the object does not exist ------------------------------------------------------------------------- + connection_id = ConnectionId(**CONNECTION_R1_R3_ID) + connection_uuid = connection_get_uuid(connection_id, allow_random=False) + with pytest.raises(grpc.RpcError) as e: + context_client.GetConnection(connection_id) + assert e.value.code() == grpc.StatusCode.NOT_FOUND + MSG = 'Connection({:s}) not found; connection_uuid generated was: {:s}' + assert e.value.details() == MSG.format(CONNECTION_R1_R3_NAME, connection_uuid) + + # ----- List when the object does not exist ------------------------------------------------------------------------ + response = context_client.ListConnectionIds(ServiceId(**SERVICE_R1_R3_ID)) + assert len(response.connection_ids) == 0 + + response = context_client.ListConnections(ServiceId(**SERVICE_R1_R3_ID)) + assert len(response.connections) == 0 + + # ----- Create the object ------------------------------------------------------------------------------------------ + with pytest.raises(grpc.RpcError) as e: + WRONG_CONNECTION = copy.deepcopy(CONNECTION_R1_R3) + WRONG_CONNECTION['path_hops_endpoint_ids'][0]\ + ['topology_id']['context_id']['context_uuid']['uuid'] = 'wrong-context-uuid' + context_client.SetConnection(Connection(**WRONG_CONNECTION)) + assert e.value.code() == grpc.StatusCode.NOT_FOUND + wrong_endpoint_id = EndPointId(**WRONG_CONNECTION['path_hops_endpoint_ids'][0]) + _,_,wrong_endpoint_uuid = endpoint_get_uuid(wrong_endpoint_id, allow_random=False) + msg = 'endpoint({:s}) not found; while inserting in table "connection_endpoint"'.format(wrong_endpoint_uuid) + assert e.value.details() == msg + # TODO: should we check that all endpoints belong to same topology? + # TODO: should we check that endpoints form links over the topology? + + response = context_client.SetConnection(Connection(**CONNECTION_R1_R3)) + connection_r1_r3_uuid = response.connection_uuid.uuid + + # ----- Check create event ----------------------------------------------------------------------------------------- + #event = events_collector.get_event(block=True) + #assert isinstance(event, ConnectionEvent) + #assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert event.connection_id.connection_uuid.uuid == connection_r1_r3_uuid + + # ----- Get when the object exists --------------------------------------------------------------------------------- + response = context_client.GetConnection(ConnectionId(**CONNECTION_R1_R3_ID)) + assert response.connection_id.connection_uuid.uuid == connection_r1_r3_uuid + assert response.service_id.context_id.context_uuid.uuid == context_uuid + assert response.service_id.service_uuid.uuid == service_r1_r3_uuid + assert len(response.path_hops_endpoint_ids) == 6 + assert len(response.sub_service_ids) == 2 + + # ----- List when the object exists -------------------------------------------------------------------------------- + response = context_client.ListConnectionIds(ServiceId(**SERVICE_R1_R3_ID)) + assert len(response.connection_ids) == 1 + assert response.connection_ids[0].connection_uuid.uuid == connection_r1_r3_uuid + + response = context_client.ListConnections(ServiceId(**SERVICE_R1_R3_ID)) + assert len(response.connections) == 1 + assert response.connections[0].connection_id.connection_uuid.uuid == connection_r1_r3_uuid + assert len(response.connections[0].path_hops_endpoint_ids) == 6 + assert len(response.connections[0].sub_service_ids) == 2 + + # ----- Update the object ------------------------------------------------------------------------------------------ + # TODO: change something... path? subservices? + response = context_client.SetConnection(Connection(**CONNECTION_R1_R3)) + assert response.connection_uuid.uuid == connection_r1_r3_uuid + + # ----- Check update event ----------------------------------------------------------------------------------------- + #event = events_collector.get_event(block=True) + #assert isinstance(event, ConnectionEvent) + #assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + #assert event.connection_id.connection_uuid.uuid == connection_r1_r3_uuid + + # ----- Get when the object is modified ---------------------------------------------------------------------------- + response = context_client.GetConnection(ConnectionId(**CONNECTION_R1_R3_ID)) + assert response.connection_id.connection_uuid.uuid == connection_r1_r3_uuid + assert response.service_id.context_id.context_uuid.uuid == context_uuid + assert response.service_id.service_uuid.uuid == service_r1_r3_uuid + assert len(response.path_hops_endpoint_ids) == 6 + assert len(response.sub_service_ids) == 2 + + # ----- List when the object is modified --------------------------------------------------------------------------- + response = context_client.ListConnectionIds(ServiceId(**SERVICE_R1_R3_ID)) + assert len(response.connection_ids) == 1 + assert response.connection_ids[0].connection_uuid.uuid == connection_r1_r3_uuid + + response = context_client.ListConnections(ServiceId(**SERVICE_R1_R3_ID)) + assert len(response.connections) == 1 + assert response.connections[0].connection_id.connection_uuid.uuid == connection_r1_r3_uuid + assert len(response.connections[0].path_hops_endpoint_ids) == 6 + assert len(response.connections[0].sub_service_ids) == 2 + + # ----- Remove the object ------------------------------------------------------------------------------------------ + context_client.RemoveConnection(ConnectionId(**CONNECTION_R1_R3_ID)) + + # ----- Check remove event ----------------------------------------------------------------------------------------- + #event = events_collector.get_event(block=True) + #assert isinstance(event, ConnectionEvent) + #assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert event.connection_id.connection_uuid.uuid == connection_r1_r3_uuid + + # ----- List after deleting the object ----------------------------------------------------------------------------- + response = context_client.ListConnectionIds(ServiceId(**SERVICE_R1_R3_ID)) + assert len(response.connection_ids) == 0 + + response = context_client.ListConnections(ServiceId(**SERVICE_R1_R3_ID)) + assert len(response.connections) == 0 + + # ----- Clean dependencies used in the test and capture related events --------------------------------------------- + context_client.RemoveService(ServiceId(**SERVICE_R1_R3_ID)) + context_client.RemoveService(ServiceId(**SERVICE_R2_R3_ID)) + context_client.RemoveService(ServiceId(**SERVICE_R1_R2_ID)) + context_client.RemoveDevice(DeviceId(**DEVICE_R1_ID)) + context_client.RemoveDevice(DeviceId(**DEVICE_R2_ID)) + context_client.RemoveDevice(DeviceId(**DEVICE_R3_ID)) + context_client.RemoveTopology(TopologyId(**TOPOLOGY_ID)) + context_client.RemoveContext(ContextId(**CONTEXT_ID)) + + #events = events_collector.get_events(block=True, count=8) + #assert isinstance(events[0], ServiceEvent) + #assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert events[0].service_id.context_id.context_uuid.uuid == context_uuid + #assert events[0].service_id.service_uuid.uuid == service_r1_r3_uuid + #assert isinstance(events[1], ServiceEvent) + #assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert events[1].service_id.context_id.context_uuid.uuid == context_uuid + #assert events[1].service_id.service_uuid.uuid == service_r2_r3_uuid + #assert isinstance(events[2], ServiceEvent) + #assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert events[2].service_id.context_id.context_uuid.uuid == context_uuid + #assert events[2].service_id.service_uuid.uuid == service_r1_r2_uuid + #assert isinstance(events[3], DeviceEvent) + #assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert events[3].device_id.device_uuid.uuid == device_r1_uuid + #assert isinstance(events[4], DeviceEvent) + #assert events[4].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert events[4].device_id.device_uuid.uuid == device_r2_uuid + #assert isinstance(events[5], DeviceEvent) + #assert events[5].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert events[5].device_id.device_uuid.uuid == device_r3_uuid + #assert isinstance(events[6], TopologyEvent) + #assert events[6].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert events[6].topology_id.context_id.context_uuid.uuid == context_uuid + #assert events[6].topology_id.topology_uuid.uuid == topology_uuid + #assert isinstance(events[7], ContextEvent) + #assert events[7].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert events[7].context_id.context_uuid.uuid == context_uuid + + # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- + #events_collector.stop() diff --git a/src/context/tests/test_policy.py b/src/context/tests/test_policy.py new file mode 100644 index 000000000..f9bf5ef6d --- /dev/null +++ b/src/context/tests/test_policy.py @@ -0,0 +1,90 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy, grpc, pytest +from common.proto.context_pb2 import Empty +from common.proto.policy_pb2 import PolicyRuleId, PolicyRule +from context.client.ContextClient import ContextClient +from context.service.database.uuids.PolicuRule import policyrule_get_uuid +from .Objects import POLICYRULE, POLICYRULE_ID, POLICYRULE_NAME + +@pytest.mark.depends(on=['context/tests/test_device.py::test_device', 'context/tests/test_service.py::test_service']) +def test_policy(context_client : ContextClient): + + # ----- Get when the object does not exist ------------------------------------------------------------------------- + policyrule_id = PolicyRuleId(**POLICYRULE_ID) + policyrule_uuid = policyrule_get_uuid(policyrule_id, allow_random=False) + + with pytest.raises(grpc.RpcError) as e: + context_client.GetPolicyRule(policyrule_id) + assert e.value.code() == grpc.StatusCode.NOT_FOUND + MSG = 'PolicyRule({:s}) not found; policyrule_uuid generated was: {:s}' + assert e.value.details() == MSG.format(POLICYRULE_NAME, policyrule_uuid) + + # ----- List when the object does not exist ------------------------------------------------------------------------ + response = context_client.ListPolicyRuleIds(Empty()) + assert len(response.policyRuleIdList) == 0 + + response = context_client.ListPolicyRules(Empty()) + assert len(response.policyRules) == 0 + + # ----- Create the object ------------------------------------------------------------------------------------------ + response = context_client.SetPolicyRule(PolicyRule(**POLICYRULE)) + assert response.uuid.uuid == policyrule_uuid + + # ----- Get when the object exists --------------------------------------------------------------------------------- + response = context_client.GetPolicyRule(PolicyRuleId(**POLICYRULE_ID)) + assert response.device.policyRuleBasic.policyRuleId.uuid.uuid == policyrule_uuid + assert response.device.policyRuleBasic.priority == 1 + + # ----- List when the object exists -------------------------------------------------------------------------------- + response = context_client.ListPolicyRuleIds(Empty()) + assert len(response.policyRuleIdList) == 1 + assert response.policyRuleIdList[0].uuid.uuid == policyrule_uuid + + response = context_client.ListPolicyRules(Empty()) + assert len(response.policyRules) == 1 + assert response.policyRules[0].device.policyRuleBasic.policyRuleId.uuid.uuid == policyrule_uuid + assert response.policyRules[0].device.policyRuleBasic.priority == 1 + + # ----- Update the object ------------------------------------------------------------------------------------------ + new_policy_priority = 100 + POLICYRULE_UPDATED = copy.deepcopy(POLICYRULE) + POLICYRULE_UPDATED['device']['policyRuleBasic']['priority'] = new_policy_priority + response = context_client.SetPolicyRule(PolicyRule(**POLICYRULE_UPDATED)) + assert response.uuid.uuid == policyrule_uuid + + # ----- Get when the object is modified ---------------------------------------------------------------------------- + response = context_client.GetPolicyRule(PolicyRuleId(**POLICYRULE_ID)) + assert response.device.policyRuleBasic.policyRuleId.uuid.uuid == policyrule_uuid + + # ----- List when the object is modified --------------------------------------------------------------------------- + response = context_client.ListPolicyRuleIds(Empty()) + assert len(response.policyRuleIdList) == 1 + assert response.policyRuleIdList[0].uuid.uuid == policyrule_uuid + + response = context_client.ListPolicyRules(Empty()) + assert len(response.policyRules) == 1 + assert response.policyRules[0].device.policyRuleBasic.policyRuleId.uuid.uuid == policyrule_uuid + assert response.policyRules[0].device.policyRuleBasic.priority == new_policy_priority + + # ----- Remove the object ------------------------------------------------------------------------------------------ + context_client.RemovePolicyRule(PolicyRuleId(**POLICYRULE_ID)) + + # ----- List after deleting the object ----------------------------------------------------------------------------- + response = context_client.ListPolicyRuleIds(Empty()) + assert len(response.policyRuleIdList) == 0 + + response = context_client.ListPolicyRules(Empty()) + assert len(response.policyRules) == 0 diff --git a/test-context.sh b/test-context.sh index a33b1e7dc..212ce5bbe 100755 --- a/test-context.sh +++ b/test-context.sh @@ -41,13 +41,15 @@ export PYTHONPATH=/home/tfs/tfs-ctrl/src # Run unitary tests and analyze coverage of code at same time # helpful pytest flags: --log-level=INFO -o log_cli=true --verbose --maxfail=1 --durations=0 coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose --maxfail=1 \ - context/tests/test_hasher.py \ - context/tests/test_context.py \ - context/tests/test_topology.py \ - context/tests/test_device.py \ - context/tests/test_link.py \ - context/tests/test_service.py \ - context/tests/test_slice.py + context/tests/test_hasher.py \ + context/tests/test_context.py \ + context/tests/test_topology.py \ + context/tests/test_device.py \ + context/tests/test_link.py \ + context/tests/test_service.py \ + context/tests/test_slice.py \ + context/tests/test_connection.py \ + context/tests/test_policy.py echo echo "Coverage report:" -- GitLab From fb1c48a7c98973b42b59401cffd1d4e313542537 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Thu, 12 Jan 2023 15:19:18 +0000 Subject: [PATCH 032/158] Context Component: - updated to new Method Wrapper API --- .../service/ContextServiceServicerImpl.py | 114 ++++++++---------- 1 file changed, 51 insertions(+), 63 deletions(-) diff --git a/src/context/service/ContextServiceServicerImpl.py b/src/context/service/ContextServiceServicerImpl.py index 6ac21a973..7e7226570 100644 --- a/src/context/service/ContextServiceServicerImpl.py +++ b/src/context/service/ContextServiceServicerImpl.py @@ -27,7 +27,7 @@ from common.proto.context_pb2 import ( from common.proto.policy_pb2 import PolicyRuleIdList, PolicyRuleId, PolicyRuleList, PolicyRule from common.proto.context_pb2_grpc import ContextServiceServicer from common.proto.context_policy_pb2_grpc import ContextPolicyServiceServicer -from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method +from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method from .database.Connection import ( connection_delete, connection_get, connection_list_ids, connection_list_objs, connection_set) from .database.Context import context_delete, context_get, context_list_ids, context_list_objs, context_set @@ -44,19 +44,7 @@ from .Constants import ( LOGGER = logging.getLogger(__name__) -SERVICE_NAME = 'Context' -METHOD_NAMES = [ - 'ListConnectionIds', 'ListConnections', 'GetConnection', 'SetConnection', 'RemoveConnection', 'GetConnectionEvents', - 'ListContextIds', 'ListContexts', 'GetContext', 'SetContext', 'RemoveContext', 'GetContextEvents', - 'ListTopologyIds', 'ListTopologies', 'GetTopology', 'SetTopology', 'RemoveTopology', 'GetTopologyEvents', - 'ListDeviceIds', 'ListDevices', 'GetDevice', 'SetDevice', 'RemoveDevice', 'GetDeviceEvents', - 'ListLinkIds', 'ListLinks', 'GetLink', 'SetLink', 'RemoveLink', 'GetLinkEvents', - 'ListServiceIds', 'ListServices', 'GetService', 'SetService', 'RemoveService', 'GetServiceEvents', - 'ListSliceIds', 'ListSlices', 'GetSlice', 'SetSlice', 'RemoveSlice', 'GetSliceEvents', - 'ListPolicyRuleIds', 'ListPolicyRules', 'GetPolicyRule', 'SetPolicyRule', 'RemovePolicyRule', - 'UnsetService', 'UnsetSlice', -] -METRICS = create_metrics(SERVICE_NAME, METHOD_NAMES) +METRICS_POOL = MetricsPool('Context', 'RPC') class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceServicer): def __init__(self, db_engine : sqlalchemy.engine.Engine, messagebroker : MessageBroker) -> None: @@ -65,38 +53,38 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer self.messagebroker = messagebroker LOGGER.debug('Servicer Created') - def _get_metrics(self): return METRICS + def _get_metrics(self): return METRICS_POOL # ----- Context ---------------------------------------------------------------------------------------------------- - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def ListContextIds(self, request : Empty, context : grpc.ServicerContext) -> ContextIdList: return context_list_ids(self.db_engine) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def ListContexts(self, request : Empty, context : grpc.ServicerContext) -> ContextList: return context_list_objs(self.db_engine) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def GetContext(self, request : ContextId, context : grpc.ServicerContext) -> Context: return context_get(self.db_engine, request) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def SetContext(self, request : Context, context : grpc.ServicerContext) -> ContextId: context_id,updated = context_set(self.db_engine, request) # pylint: disable=unused-variable #event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE #notify_event(self.messagebroker, TOPIC_CONTEXT, event_type, {'context_id': context_id}) return context_id - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def RemoveContext(self, request : ContextId, context : grpc.ServicerContext) -> Empty: deleted = context_delete(self.db_engine, request) # pylint: disable=unused-variable #if deleted: # notify_event(self.messagebroker, TOPIC_CONTEXT, EventTypeEnum.EVENTTYPE_REMOVE, {'context_id': request}) return Empty() - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def GetContextEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[ContextEvent]: for message in self.messagebroker.consume({TOPIC_CONTEXT}, consume_timeout=CONSUME_TIMEOUT): yield ContextEvent(**json.loads(message.content)) @@ -104,33 +92,33 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # ----- Topology --------------------------------------------------------------------------------------------------- - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def ListTopologyIds(self, request : ContextId, context : grpc.ServicerContext) -> TopologyIdList: return topology_list_ids(self.db_engine, request) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def ListTopologies(self, request : ContextId, context : grpc.ServicerContext) -> TopologyList: return topology_list_objs(self.db_engine, request) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def GetTopology(self, request : TopologyId, context : grpc.ServicerContext) -> Topology: return topology_get(self.db_engine, request) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def SetTopology(self, request : Topology, context : grpc.ServicerContext) -> TopologyId: topology_id,updated = topology_set(self.db_engine, request) # pylint: disable=unused-variable #event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE #notify_event(self.messagebroker, TOPIC_TOPOLOGY, event_type, {'topology_id': topology_id}) return topology_id - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def RemoveTopology(self, request : TopologyId, context : grpc.ServicerContext) -> Empty: deleted = topology_delete(self.db_engine, request) # pylint: disable=unused-variable #if deleted: # notify_event(self.messagebroker, TOPIC_TOPOLOGY, EventTypeEnum.EVENTTYPE_REMOVE, {'topology_id': request}) return Empty() - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def GetTopologyEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[TopologyEvent]: for message in self.messagebroker.consume({TOPIC_TOPOLOGY}, consume_timeout=CONSUME_TIMEOUT): yield TopologyEvent(**json.loads(message.content)) @@ -138,33 +126,33 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # ----- Device ----------------------------------------------------------------------------------------------------- - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def ListDeviceIds(self, request : Empty, context : grpc.ServicerContext) -> DeviceIdList: return device_list_ids(self.db_engine) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def ListDevices(self, request : Empty, context : grpc.ServicerContext) -> DeviceList: return device_list_objs(self.db_engine) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def GetDevice(self, request : ContextId, context : grpc.ServicerContext) -> Device: return device_get(self.db_engine, request) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def SetDevice(self, request : Device, context : grpc.ServicerContext) -> DeviceId: device_id,updated = device_set(self.db_engine, request) # pylint: disable=unused-variable #event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE #notify_event(self.messagebroker, TOPIC_DEVICE, event_type, {'device_id': device_id}) return device_id - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def RemoveDevice(self, request : DeviceId, context : grpc.ServicerContext) -> Empty: deleted = device_delete(self.db_engine, request) # pylint: disable=unused-variable #if deleted: # notify_event(self.messagebroker, TOPIC_DEVICE, EventTypeEnum.EVENTTYPE_REMOVE, {'device_id': request}) return Empty() - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def GetDeviceEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[DeviceEvent]: for message in self.messagebroker.consume({TOPIC_DEVICE}, consume_timeout=CONSUME_TIMEOUT): yield DeviceEvent(**json.loads(message.content)) @@ -172,33 +160,33 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # ----- Link ------------------------------------------------------------------------------------------------------- - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def ListLinkIds(self, request : Empty, context : grpc.ServicerContext) -> LinkIdList: return link_list_ids(self.db_engine) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def ListLinks(self, request : Empty, context : grpc.ServicerContext) -> LinkList: return link_list_objs(self.db_engine) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def GetLink(self, request : LinkId, context : grpc.ServicerContext) -> Link: return link_get(self.db_engine, request) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def SetLink(self, request : Link, context : grpc.ServicerContext) -> LinkId: link_id,updated = link_set(self.db_engine, request) # pylint: disable=unused-variable #event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE #notify_event(self.messagebroker, TOPIC_LINK, event_type, {'link_id': link_id}) return link_id - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def RemoveLink(self, request : LinkId, context : grpc.ServicerContext) -> Empty: deleted = link_delete(self.db_engine, request) # pylint: disable=unused-variable #if deleted: # notify_event(self.messagebroker, TOPIC_LINK, EventTypeEnum.EVENTTYPE_REMOVE, {'link_id': request}) return Empty() - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def GetLinkEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[LinkEvent]: for message in self.messagebroker.consume({TOPIC_LINK}, consume_timeout=CONSUME_TIMEOUT): yield LinkEvent(**json.loads(message.content)) @@ -206,33 +194,33 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # ----- Service ---------------------------------------------------------------------------------------------------- - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def ListServiceIds(self, request : ContextId, context : grpc.ServicerContext) -> ServiceIdList: return service_list_ids(self.db_engine, request) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def ListServices(self, request : ContextId, context : grpc.ServicerContext) -> ServiceList: return service_list_objs(self.db_engine, request) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def GetService(self, request : ServiceId, context : grpc.ServicerContext) -> Service: return service_get(self.db_engine, request) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def SetService(self, request : Service, context : grpc.ServicerContext) -> ServiceId: service_id,updated = service_set(self.db_engine, request) # pylint: disable=unused-variable #event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE #notify_event(self.messagebroker, TOPIC_SERVICE, event_type, {'service_id': service_id}) return service_id - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def RemoveService(self, request : ServiceId, context : grpc.ServicerContext) -> Empty: deleted = service_delete(self.db_engine, request) # pylint: disable=unused-variable #if deleted: # notify_event(self.messagebroker, TOPIC_SERVICE, EventTypeEnum.EVENTTYPE_REMOVE, {'service_id': request}) return Empty() - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def GetServiceEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[ServiceEvent]: for message in self.messagebroker.consume({TOPIC_SERVICE}, consume_timeout=CONSUME_TIMEOUT): yield ServiceEvent(**json.loads(message.content)) @@ -240,40 +228,40 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # ----- Slice ---------------------------------------------------------------------------------------------------- - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def ListSliceIds(self, request : ContextId, context : grpc.ServicerContext) -> SliceIdList: return slice_list_ids(self.db_engine, request) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def ListSlices(self, request : ContextId, context : grpc.ServicerContext) -> SliceList: return slice_list_objs(self.db_engine, request) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def GetSlice(self, request : SliceId, context : grpc.ServicerContext) -> Slice: return slice_get(self.db_engine, request) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def SetSlice(self, request : Slice, context : grpc.ServicerContext) -> SliceId: slice_id,updated = slice_set(self.db_engine, request) # pylint: disable=unused-variable #event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE #notify_event(self.messagebroker, TOPIC_SLICE, event_type, {'slice_id': slice_id}) return slice_id - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def UnsetSlice(self, request : Slice, context : grpc.ServicerContext) -> SliceId: slice_id,updated = slice_unset(self.db_engine, request) # pylint: disable=unused-variable #if updated: # notify_event(self.messagebroker, TOPIC_SLICE, EventTypeEnum.EVENTTYPE_UPDATE, {'slice_id': slice_id}) return slice_id - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def RemoveSlice(self, request : SliceId, context : grpc.ServicerContext) -> Empty: deleted = slice_delete(self.db_engine, request) # pylint: disable=unused-variable #if deleted: # notify_event(self.messagebroker, TOPIC_SLICE, EventTypeEnum.EVENTTYPE_REMOVE, {'slice_id': request}) return Empty() - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def GetSliceEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[SliceEvent]: for message in self.messagebroker.consume({TOPIC_SLICE}, consume_timeout=CONSUME_TIMEOUT): yield SliceEvent(**json.loads(message.content)) @@ -281,26 +269,26 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # ----- Connection ------------------------------------------------------------------------------------------------- - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def ListConnectionIds(self, request : ServiceId, context : grpc.ServicerContext) -> ConnectionIdList: return connection_list_ids(self.db_engine, request) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def ListConnections(self, request : ContextId, context : grpc.ServicerContext) -> ConnectionList: return connection_list_objs(self.db_engine, request) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def GetConnection(self, request : ConnectionId, context : grpc.ServicerContext) -> Connection: return connection_get(self.db_engine, request) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def SetConnection(self, request : Connection, context : grpc.ServicerContext) -> ConnectionId: connection_id,updated = connection_set(self.db_engine, request) # pylint: disable=unused-variable #event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE #notify_event(self.messagebroker, TOPIC_CONNECTION, event_type, {'connection_id': connection_id}) return connection_id - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def RemoveConnection(self, request : ConnectionId, context : grpc.ServicerContext) -> Empty: deleted = connection_delete(self.db_engine, request) # pylint: disable=unused-variable #if deleted: @@ -308,7 +296,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # notify_event(self.messagebroker, TOPIC_CONNECTION, event_type, {'connection_id': request}) return Empty() - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def GetConnectionEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[ConnectionEvent]: for message in self.messagebroker.consume({TOPIC_CONNECTION}, consume_timeout=CONSUME_TIMEOUT): yield ConnectionEvent(**json.loads(message.content)) @@ -316,24 +304,24 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer # ----- Policy ----------------------------------------------------------------------------------------------------- - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def ListPolicyRuleIds(self, request : Empty, context: grpc.ServicerContext) -> PolicyRuleIdList: return policyrule_list_ids(self.db_engine) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def ListPolicyRules(self, request : Empty, context: grpc.ServicerContext) -> PolicyRuleList: return policyrule_list_objs(self.db_engine) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def GetPolicyRule(self, request : PolicyRuleId, context: grpc.ServicerContext) -> PolicyRule: return policyrule_get(self.db_engine, request) - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def SetPolicyRule(self, request : PolicyRule, context: grpc.ServicerContext) -> PolicyRuleId: policyrule_id,updated = policyrule_set(self.db_engine, request) # pylint: disable=unused-variable return policyrule_id - @safe_and_metered_rpc_method(METRICS, LOGGER) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def RemovePolicyRule(self, request : PolicyRuleId, context: grpc.ServicerContext) -> Empty: deleted = policyrule_delete(self.db_engine, request) # pylint: disable=unused-variable return Empty() -- GitLab From d1c139183bfc6a242613bb53abb612fe5c3f203e Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Thu, 12 Jan 2023 15:54:17 +0000 Subject: [PATCH 033/158] Context component: - repositioned Database Engine class - updated CI/CD pipeline --- src/context/.gitlab-ci.yml | 30 ++++++++++++++++---- src/context/service/__main__.py | 8 +++--- src/context/service/{ => database}/Engine.py | 0 src/context/tests/conftest.py | 2 +- 4 files changed, 29 insertions(+), 11 deletions(-) rename src/context/service/{ => database}/Engine.py (100%) diff --git a/src/context/.gitlab-ci.yml b/src/context/.gitlab-ci.yml index 0da2b582e..ef780f7e3 100644 --- a/src/context/.gitlab-ci.yml +++ b/src/context/.gitlab-ci.yml @@ -49,22 +49,40 @@ unit test context: before_script: - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create -d bridge teraflowbridge; fi - - if docker container ls | grep redis; then docker rm -f redis; else echo "redis image is not in the system"; fi + - if docker container ls | grep crdb; then docker rm -f crdb; else echo "CockroachDB container is not in the system"; fi + - if docker volume ls | grep crdb; then docker volume rm -f crdb; else echo "CockroachDB volume is not in the system"; fi - if docker container ls | grep $IMAGE_NAME; then docker rm -f $IMAGE_NAME; else echo "$IMAGE_NAME image is not in the system"; fi script: - docker pull "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG" - - docker pull "redis:6.2" - - docker run --name redis -d --network=teraflowbridge redis:6.2 + - docker pull "cockroachdb/cockroach:latest-v22.2" + - docker volume create crdb + - > + docker run --name crdb -d --network=teraflowbridge -p 26257:26257 -p 8080:8080 + --env COCKROACH_DATABASE=tfs_test + --env COCKROACH_USER=tfs + --env COCKROACH_PASSWORD=tfs123 + --volume "crdb:/cockroach/cockroach-data" + --volume "~/init-scripts:/docker-entrypoint-initdb.d" + cockroachdb/cockroach:latest-v22.2 start-single-node - sleep 10 - - docker run --name $IMAGE_NAME -d -p 1010:1010 --env "DB_BACKEND=redis" --env "REDIS_SERVICE_HOST=redis" --env "REDIS_SERVICE_PORT=6379" --env "REDIS_DATABASE_ID=0" -v "$PWD/src/$IMAGE_NAME/tests:/opt/results" --network=teraflowbridge $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG + - CRDB_ADDRESS=$(docker inspect crdb --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}") + - > + docker run --name $IMAGE_NAME -d -p 1010:1010 + --env "CRDB_URI=cockroachdb://tfs:tfs123@${CRDB_ADDRESS}:26257/tfs_test?sslmode=require" + --volume "$PWD/src/$IMAGE_NAME/tests:/opt/results" + --network=teraflowbridge + $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG - docker ps -a - docker logs $IMAGE_NAME - - docker exec -i $IMAGE_NAME bash -c "coverage run -m pytest --log-level=INFO --verbose $IMAGE_NAME/tests/test_unitary.py --junitxml=/opt/results/${IMAGE_NAME}_report.xml" + - > + docker exec -i $IMAGE_NAME bash -c + "coverage run -m pytest --log-level=INFO --verbose $IMAGE_NAME/tests/*.py --junitxml=/opt/results/${IMAGE_NAME}_report.xml" - docker exec -i $IMAGE_NAME bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing" coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/' after_script: - docker rm -f $IMAGE_NAME - - docker rm -f redis + - docker rm -f crdb + - docker volume rm -f crdb - docker network rm teraflowbridge rules: - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' diff --git a/src/context/service/__main__.py b/src/context/service/__main__.py index fbdabb2d7..9960e94b5 100644 --- a/src/context/service/__main__.py +++ b/src/context/service/__main__.py @@ -17,10 +17,9 @@ from prometheus_client import start_http_server from common.Settings import get_log_level, get_metrics_port from common.message_broker.Factory import get_messagebroker_backend from common.message_broker.MessageBroker import MessageBroker -from sqlalchemy.orm import sessionmaker -from .database import rebuild_database from .ContextService import ContextService -from .Engine import Engine +from .database.Engine import Engine +from .database.models._Base import rebuild_database LOG_LEVEL = get_log_level() logging.basicConfig(level=LOG_LEVEL, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s") @@ -46,7 +45,8 @@ def main(): start_http_server(metrics_port) db_engine = Engine.get_engine() - rebuild_database(db_engine, drop_if_exists=False) + Engine.create_database(db_engine) + rebuild_database(db_engine) # Get message broker instance messagebroker = MessageBroker(get_messagebroker_backend()) diff --git a/src/context/service/Engine.py b/src/context/service/database/Engine.py similarity index 100% rename from src/context/service/Engine.py rename to src/context/service/database/Engine.py diff --git a/src/context/tests/conftest.py b/src/context/tests/conftest.py index 38e488af4..dc54c8cdc 100644 --- a/src/context/tests/conftest.py +++ b/src/context/tests/conftest.py @@ -25,7 +25,7 @@ from common.message_broker.Factory import get_messagebroker_backend, BackendEnum from common.message_broker.MessageBroker import MessageBroker from context.client.ContextClient import ContextClient from context.service.ContextService import ContextService -from context.service.Engine import Engine +from context.service.database.Engine import Engine from context.service.database.models._Base import rebuild_database LOCAL_HOST = '127.0.0.1' -- GitLab From a6c1c53df94dfc87ac5b8343ffa91a623f2ec53f Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Thu, 12 Jan 2023 16:05:46 +0000 Subject: [PATCH 034/158] Context component: - corrected requirements - updated CI/CD pipeline --- common_requirements.in | 2 ++ src/context/.gitlab-ci.yml | 3 +-- src/context/requirements.in | 6 ------ 3 files changed, 3 insertions(+), 8 deletions(-) diff --git a/common_requirements.in b/common_requirements.in index 772c1115d..c255e6d9f 100644 --- a/common_requirements.in +++ b/common_requirements.in @@ -2,8 +2,10 @@ coverage==6.3 grpcio==1.47.* grpcio-health-checking==1.47.* grpcio-tools==1.47.* +prettytable==3.5.0 prometheus-client==0.13.0 protobuf==3.20.* pytest==6.2.5 pytest-benchmark==3.4.1 python-dateutil==2.8.2 +pytest-depends==1.0.1 diff --git a/src/context/.gitlab-ci.yml b/src/context/.gitlab-ci.yml index ef780f7e3..9004d7dcd 100644 --- a/src/context/.gitlab-ci.yml +++ b/src/context/.gitlab-ci.yml @@ -51,7 +51,7 @@ unit test context: - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create -d bridge teraflowbridge; fi - if docker container ls | grep crdb; then docker rm -f crdb; else echo "CockroachDB container is not in the system"; fi - if docker volume ls | grep crdb; then docker volume rm -f crdb; else echo "CockroachDB volume is not in the system"; fi - - if docker container ls | grep $IMAGE_NAME; then docker rm -f $IMAGE_NAME; else echo "$IMAGE_NAME image is not in the system"; fi + - if docker container ls | grep $IMAGE_NAME; then docker rm -f $IMAGE_NAME; else echo "$IMAGE_NAME container is not in the system"; fi script: - docker pull "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG" - docker pull "cockroachdb/cockroach:latest-v22.2" @@ -62,7 +62,6 @@ unit test context: --env COCKROACH_USER=tfs --env COCKROACH_PASSWORD=tfs123 --volume "crdb:/cockroach/cockroach-data" - --volume "~/init-scripts:/docker-entrypoint-initdb.d" cockroachdb/cockroach:latest-v22.2 start-single-node - sleep 10 - CRDB_ADDRESS=$(docker inspect crdb --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}") diff --git a/src/context/requirements.in b/src/context/requirements.in index f5d5ccbe2..83ae02faf 100644 --- a/src/context/requirements.in +++ b/src/context/requirements.in @@ -1,10 +1,4 @@ -Flask==2.1.3 -Flask-RESTful==0.3.9 psycopg2-binary==2.9.3 -pytest-depends==1.0.1 -redis==4.1.2 -requests==2.27.1 SQLAlchemy==1.4.40 sqlalchemy-cockroachdb==1.4.3 SQLAlchemy-Utils==0.38.3 -prettytable==3.5.0 -- GitLab From 82dacd4a559bb0f4a6287368fcff769ddefa9f09 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Thu, 12 Jan 2023 16:23:10 +0000 Subject: [PATCH 035/158] Context component: - updated CI/CD pipeline --- .gitlab-ci.yml | 4 ++-- manifests/.gitlab-ci.yml | 14 +++++++------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index dac76342a..8e26a1644 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -14,7 +14,7 @@ # stages of the cicd pipeline stages: - - dependencies + #- dependencies - build - test - unit_test @@ -24,7 +24,7 @@ stages: # include the individual .gitlab-ci.yml of each micro-service include: - - local: '/manifests/.gitlab-ci.yml' + #- local: '/manifests/.gitlab-ci.yml' - local: '/src/monitoring/.gitlab-ci.yml' - local: '/src/compute/.gitlab-ci.yml' - local: '/src/context/.gitlab-ci.yml' diff --git a/manifests/.gitlab-ci.yml b/manifests/.gitlab-ci.yml index d20b67e53..9ce323c58 100644 --- a/manifests/.gitlab-ci.yml +++ b/manifests/.gitlab-ci.yml @@ -14,10 +14,10 @@ # Deployment of the dependency services in Kubernetes Cluster -dependencies all: - stage: dependencies - script: - - kubectl version - - kubectl get all - - kubectl apply -f "manifests/prometheus.yaml" - - kubectl get all +#dependencies all: +# stage: dependencies +# script: +# - kubectl version +# - kubectl get all +# - kubectl apply -f "manifests/prometheus.yaml" +# - kubectl get all -- GitLab From eef61a0c4d51a25f91c929a58ed65d327f71760f Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Thu, 12 Jan 2023 16:25:35 +0000 Subject: [PATCH 036/158] Context component: - updated CI/CD pipeline --- src/context/.gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/context/.gitlab-ci.yml b/src/context/.gitlab-ci.yml index 9004d7dcd..549e53798 100644 --- a/src/context/.gitlab-ci.yml +++ b/src/context/.gitlab-ci.yml @@ -75,7 +75,7 @@ unit test context: - docker logs $IMAGE_NAME - > docker exec -i $IMAGE_NAME bash -c - "coverage run -m pytest --log-level=INFO --verbose $IMAGE_NAME/tests/*.py --junitxml=/opt/results/${IMAGE_NAME}_report.xml" + "coverage run -m pytest --log-level=INFO --verbose --junitxml=/opt/results/${IMAGE_NAME}_report.xml $IMAGE_NAME/tests/test_*.py" - docker exec -i $IMAGE_NAME bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing" coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/' after_script: -- GitLab From 19625c3ef50c2a734c8aaddedc970f45069d7066 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Thu, 12 Jan 2023 16:35:20 +0000 Subject: [PATCH 037/158] Context component: - updated CI/CD pipeline --- src/context/.gitlab-ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/src/context/.gitlab-ci.yml b/src/context/.gitlab-ci.yml index 549e53798..468566701 100644 --- a/src/context/.gitlab-ci.yml +++ b/src/context/.gitlab-ci.yml @@ -72,6 +72,7 @@ unit test context: --network=teraflowbridge $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG - docker ps -a + - sleep 10 - docker logs $IMAGE_NAME - > docker exec -i $IMAGE_NAME bash -c -- GitLab From 50fa943c58c45621f5bdb84736a6314f2e7990c5 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Thu, 12 Jan 2023 16:48:00 +0000 Subject: [PATCH 038/158] Context component: - disabled Redis MessageBroker backend --- src/common/message_broker/Factory.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/common/message_broker/Factory.py b/src/common/message_broker/Factory.py index a64913df0..c5d48f9e1 100644 --- a/src/common/message_broker/Factory.py +++ b/src/common/message_broker/Factory.py @@ -17,13 +17,13 @@ from typing import Optional, Union from .backend._Backend import _Backend from .backend.BackendEnum import BackendEnum from .backend.inmemory.InMemoryBackend import InMemoryBackend -from .backend.redis.RedisBackend import RedisBackend +#from .backend.redis.RedisBackend import RedisBackend LOGGER = logging.getLogger(__name__) BACKENDS = { BackendEnum.INMEMORY.value: InMemoryBackend, - BackendEnum.REDIS.value: RedisBackend, + #BackendEnum.REDIS.value: RedisBackend, #BackendEnum.KAFKA.value: KafkaBackend, #BackendEnum.RABBITMQ.value: RabbitMQBackend, #BackendEnum.ZEROMQ.value: ZeroMQBackend, -- GitLab From af0ad67a1ddce877fa020cb5a9f4f60802e0e9b5 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Thu, 12 Jan 2023 17:07:58 +0000 Subject: [PATCH 039/158] Common - Object Factory: - corrected slice and service methods --- src/common/tools/object_factory/Service.py | 2 +- src/common/tools/object_factory/Slice.py | 6 ++---- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/src/common/tools/object_factory/Service.py b/src/common/tools/object_factory/Service.py index 3dcc83a84..5c0a60776 100644 --- a/src/common/tools/object_factory/Service.py +++ b/src/common/tools/object_factory/Service.py @@ -44,7 +44,7 @@ def json_service( def json_service_l2nm_planned( service_uuid : str, endpoint_ids : List[Dict] = [], constraints : List[Dict] = [], - config_rules : List[Dict] = [], context_uuid : str = DEFAULT_CONTEXT_UUID + config_rules : List[Dict] = [], context_uuid : str = DEFAULT_CONTEXT_NAME ): return json_service( diff --git a/src/common/tools/object_factory/Slice.py b/src/common/tools/object_factory/Slice.py index 2376784e3..6ab666aa6 100644 --- a/src/common/tools/object_factory/Slice.py +++ b/src/common/tools/object_factory/Slice.py @@ -14,9 +14,7 @@ import copy from typing import Dict, List, Optional -from common.Constants import DEFAULT_CONTEXT_UUID from common.proto.context_pb2 import SliceStatusEnum -from common.tools.object_factory.Context import json_context_id def get_slice_uuid(a_endpoint_id : Dict, z_endpoint_id : Dict) -> str: return 'slc:{:s}/{:s}=={:s}/{:s}'.format( @@ -32,13 +30,13 @@ def json_slice_owner(owner_uuid : str, owner_string : str) -> Dict: return {'owner_uuid': {'uuid': owner_uuid}, 'owner_string': owner_string} def json_slice( - slice_uuid : str, context_uuid : str = DEFAULT_CONTEXT_UUID, + slice_uuid : str, context_id : Optional[Dict] = None, status : SliceStatusEnum = SliceStatusEnum.SLICESTATUS_PLANNED, endpoint_ids : List[Dict] = [], constraints : List[Dict] = [], config_rules : List[Dict] = [], service_ids : List[Dict] = [], subslice_ids : List[Dict] = [], owner : Optional[Dict] = None): result = { - 'slice_id' : json_slice_id(slice_uuid, context_id=json_context_id(context_uuid)), + 'slice_id' : json_slice_id(slice_uuid, context_id=context_id), 'slice_status' : {'slice_status': status}, 'slice_endpoint_ids': copy.deepcopy(endpoint_ids), 'slice_constraints' : copy.deepcopy(constraints), -- GitLab From f37d33579e6bb33ddea36d3faad2af5a5ed359ac Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Thu, 12 Jan 2023 17:09:08 +0000 Subject: [PATCH 040/158] Context component: - corrected imports of Method Wrappers API --- scripts/run_tests_locally.sh | 2 +- src/context/service/database/Connection.py | 2 +- src/context/service/database/Context.py | 2 +- src/context/service/database/Device.py | 2 +- src/context/service/database/Link.py | 2 +- src/context/service/database/PolicyRule.py | 2 +- src/context/service/database/Service.py | 2 +- src/context/service/database/Slice.py | 2 +- src/context/service/database/Topology.py | 2 +- src/context/service/database/uuids/Connection.py | 2 +- src/context/service/database/uuids/Context.py | 2 +- src/context/service/database/uuids/Device.py | 2 +- src/context/service/database/uuids/EndPoint.py | 2 +- src/context/service/database/uuids/Link.py | 2 +- src/context/service/database/uuids/PolicuRule.py | 2 +- src/context/service/database/uuids/Service.py | 2 +- src/context/service/database/uuids/Slice.py | 2 +- src/context/service/database/uuids/Topology.py | 2 +- 18 files changed, 18 insertions(+), 18 deletions(-) diff --git a/scripts/run_tests_locally.sh b/scripts/run_tests_locally.sh index 1d48cc1af..486107994 100755 --- a/scripts/run_tests_locally.sh +++ b/scripts/run_tests_locally.sh @@ -54,7 +54,7 @@ rm -f $COVERAGEFILE coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \ common/orm/tests/test_unitary.py \ common/message_broker/tests/test_unitary.py \ - common/rpc_method_wrapper/tests/test_unitary.py + common/method_wrappers/tests/test_unitary.py coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \ context/tests/test_unitary.py diff --git a/src/context/service/database/Connection.py b/src/context/service/database/Connection.py index 3ab0b83bf..42fc86ebf 100644 --- a/src/context/service/database/Connection.py +++ b/src/context/service/database/Connection.py @@ -20,7 +20,7 @@ from sqlalchemy.orm import Session, sessionmaker from sqlalchemy_cockroachdb import run_transaction from typing import Dict, List, Optional, Tuple from common.proto.context_pb2 import Connection, ConnectionId, ConnectionIdList, ConnectionList, ServiceId -from common.rpc_method_wrapper.ServiceExceptions import NotFoundException +from common.method_wrappers.ServiceExceptions import NotFoundException from common.tools.grpc.Tools import grpc_message_to_json_string from common.tools.object_factory.Connection import json_connection_id from .models.ConnectionModel import ConnectionEndPointModel, ConnectionModel, ConnectionSubServiceModel diff --git a/src/context/service/database/Context.py b/src/context/service/database/Context.py index e136a4f83..6c7003e95 100644 --- a/src/context/service/database/Context.py +++ b/src/context/service/database/Context.py @@ -19,7 +19,7 @@ from sqlalchemy.orm import Session, sessionmaker from sqlalchemy_cockroachdb import run_transaction from typing import Dict, List, Optional, Tuple from common.proto.context_pb2 import Context, ContextId, ContextIdList, ContextList -from common.rpc_method_wrapper.ServiceExceptions import NotFoundException +from common.method_wrappers.ServiceExceptions import NotFoundException from common.tools.object_factory.Context import json_context_id from .models.ContextModel import ContextModel from .uuids.Context import context_get_uuid diff --git a/src/context/service/database/Device.py b/src/context/service/database/Device.py index acb1603c6..ccd991d7f 100644 --- a/src/context/service/database/Device.py +++ b/src/context/service/database/Device.py @@ -18,7 +18,7 @@ from sqlalchemy.orm import Session, sessionmaker from sqlalchemy_cockroachdb import run_transaction from typing import Dict, List, Optional, Set, Tuple from common.proto.context_pb2 import Device, DeviceId, DeviceIdList, DeviceList -from common.rpc_method_wrapper.ServiceExceptions import InvalidArgumentException, NotFoundException +from common.method_wrappers.ServiceExceptions import InvalidArgumentException, NotFoundException from common.tools.object_factory.Device import json_device_id from context.service.database.ConfigRule import compose_config_rules_data, upsert_config_rules from .models.DeviceModel import DeviceModel diff --git a/src/context/service/database/Link.py b/src/context/service/database/Link.py index a2b4e3035..c21dd6714 100644 --- a/src/context/service/database/Link.py +++ b/src/context/service/database/Link.py @@ -18,7 +18,7 @@ from sqlalchemy.orm import Session, sessionmaker from sqlalchemy_cockroachdb import run_transaction from typing import Dict, List, Optional, Set, Tuple from common.proto.context_pb2 import Link, LinkId, LinkIdList, LinkList -from common.rpc_method_wrapper.ServiceExceptions import NotFoundException +from common.method_wrappers.ServiceExceptions import NotFoundException from common.tools.object_factory.Link import json_link_id from .models.LinkModel import LinkModel, LinkEndPointModel from .models.TopologyModel import TopologyLinkModel diff --git a/src/context/service/database/PolicyRule.py b/src/context/service/database/PolicyRule.py index da8356e04..2371af88e 100644 --- a/src/context/service/database/PolicyRule.py +++ b/src/context/service/database/PolicyRule.py @@ -19,7 +19,7 @@ from sqlalchemy.orm import Session, sessionmaker from sqlalchemy_cockroachdb import run_transaction from typing import Dict, List, Optional, Set, Tuple from common.proto.policy_pb2 import PolicyRule, PolicyRuleId, PolicyRuleIdList, PolicyRuleList -from common.rpc_method_wrapper.ServiceExceptions import NotFoundException +from common.method_wrappers.ServiceExceptions import NotFoundException from common.tools.grpc.Tools import grpc_message_to_json from common.tools.object_factory.PolicyRule import json_policyrule_id from context.service.database.uuids.Device import device_get_uuid diff --git a/src/context/service/database/Service.py b/src/context/service/database/Service.py index c926c2540..247914d65 100644 --- a/src/context/service/database/Service.py +++ b/src/context/service/database/Service.py @@ -18,7 +18,7 @@ from sqlalchemy.orm import Session, sessionmaker from sqlalchemy_cockroachdb import run_transaction from typing import Dict, List, Optional, Tuple from common.proto.context_pb2 import ContextId, Service, ServiceId, ServiceIdList, ServiceList -from common.rpc_method_wrapper.ServiceExceptions import InvalidArgumentException, NotFoundException +from common.method_wrappers.ServiceExceptions import InvalidArgumentException, NotFoundException from common.tools.object_factory.Context import json_context_id from common.tools.object_factory.Service import json_service_id from context.service.database.ConfigRule import compose_config_rules_data, upsert_config_rules diff --git a/src/context/service/database/Slice.py b/src/context/service/database/Slice.py index 6566f94c5..e963fb772 100644 --- a/src/context/service/database/Slice.py +++ b/src/context/service/database/Slice.py @@ -19,7 +19,7 @@ from sqlalchemy.orm import Session, sessionmaker from sqlalchemy_cockroachdb import run_transaction from typing import Dict, List, Optional, Set, Tuple from common.proto.context_pb2 import ContextId, Slice, SliceId, SliceIdList, SliceList -from common.rpc_method_wrapper.ServiceExceptions import InvalidArgumentException, NotFoundException +from common.method_wrappers.ServiceExceptions import InvalidArgumentException, NotFoundException from common.tools.object_factory.Context import json_context_id from common.tools.object_factory.Slice import json_slice_id from context.service.database.ConfigRule import compose_config_rules_data, upsert_config_rules diff --git a/src/context/service/database/Topology.py b/src/context/service/database/Topology.py index a7272713c..40ecb6c39 100644 --- a/src/context/service/database/Topology.py +++ b/src/context/service/database/Topology.py @@ -19,7 +19,7 @@ from sqlalchemy.orm import Session, sessionmaker from sqlalchemy_cockroachdb import run_transaction from typing import Dict, List, Optional, Tuple from common.proto.context_pb2 import ContextId, Topology, TopologyId, TopologyIdList, TopologyList -from common.rpc_method_wrapper.ServiceExceptions import NotFoundException +from common.method_wrappers.ServiceExceptions import NotFoundException from common.tools.object_factory.Context import json_context_id from common.tools.object_factory.Topology import json_topology_id from .models.TopologyModel import TopologyModel diff --git a/src/context/service/database/uuids/Connection.py b/src/context/service/database/uuids/Connection.py index 24c2e9977..eea3b7214 100644 --- a/src/context/service/database/uuids/Connection.py +++ b/src/context/service/database/uuids/Connection.py @@ -13,7 +13,7 @@ # limitations under the License. from common.proto.context_pb2 import ConnectionId -from common.rpc_method_wrapper.ServiceExceptions import InvalidArgumentsException +from common.method_wrappers.ServiceExceptions import InvalidArgumentsException from ._Builder import get_uuid_from_string, get_uuid_random def connection_get_uuid( diff --git a/src/context/service/database/uuids/Context.py b/src/context/service/database/uuids/Context.py index 753f80e9c..1b798123e 100644 --- a/src/context/service/database/uuids/Context.py +++ b/src/context/service/database/uuids/Context.py @@ -13,7 +13,7 @@ # limitations under the License. from common.proto.context_pb2 import ContextId -from common.rpc_method_wrapper.ServiceExceptions import InvalidArgumentsException +from common.method_wrappers.ServiceExceptions import InvalidArgumentsException from ._Builder import get_uuid_from_string, get_uuid_random def context_get_uuid( diff --git a/src/context/service/database/uuids/Device.py b/src/context/service/database/uuids/Device.py index c1b66759b..41391c8fa 100644 --- a/src/context/service/database/uuids/Device.py +++ b/src/context/service/database/uuids/Device.py @@ -13,7 +13,7 @@ # limitations under the License. from common.proto.context_pb2 import DeviceId -from common.rpc_method_wrapper.ServiceExceptions import InvalidArgumentsException +from common.method_wrappers.ServiceExceptions import InvalidArgumentsException from ._Builder import get_uuid_from_string, get_uuid_random def device_get_uuid( diff --git a/src/context/service/database/uuids/EndPoint.py b/src/context/service/database/uuids/EndPoint.py index 7afb87184..f257d1b41 100644 --- a/src/context/service/database/uuids/EndPoint.py +++ b/src/context/service/database/uuids/EndPoint.py @@ -14,7 +14,7 @@ from typing import Tuple from common.proto.context_pb2 import EndPointId -from common.rpc_method_wrapper.ServiceExceptions import InvalidArgumentsException +from common.method_wrappers.ServiceExceptions import InvalidArgumentsException from ._Builder import get_uuid_from_string, get_uuid_random from .Device import device_get_uuid from .Topology import topology_get_uuid diff --git a/src/context/service/database/uuids/Link.py b/src/context/service/database/uuids/Link.py index d1ae4c21f..2d68ed76f 100644 --- a/src/context/service/database/uuids/Link.py +++ b/src/context/service/database/uuids/Link.py @@ -13,7 +13,7 @@ # limitations under the License. from common.proto.context_pb2 import LinkId -from common.rpc_method_wrapper.ServiceExceptions import InvalidArgumentsException +from common.method_wrappers.ServiceExceptions import InvalidArgumentsException from ._Builder import get_uuid_from_string, get_uuid_random def link_get_uuid( diff --git a/src/context/service/database/uuids/PolicuRule.py b/src/context/service/database/uuids/PolicuRule.py index d5266ad11..dbe691a2d 100644 --- a/src/context/service/database/uuids/PolicuRule.py +++ b/src/context/service/database/uuids/PolicuRule.py @@ -13,7 +13,7 @@ # limitations under the License. from common.proto.policy_pb2 import PolicyRuleId -from common.rpc_method_wrapper.ServiceExceptions import InvalidArgumentException +from common.method_wrappers.ServiceExceptions import InvalidArgumentException from ._Builder import get_uuid_from_string, get_uuid_random def policyrule_get_uuid( diff --git a/src/context/service/database/uuids/Service.py b/src/context/service/database/uuids/Service.py index 56a5d12a0..f3d205909 100644 --- a/src/context/service/database/uuids/Service.py +++ b/src/context/service/database/uuids/Service.py @@ -14,7 +14,7 @@ from typing import Tuple from common.proto.context_pb2 import ServiceId -from common.rpc_method_wrapper.ServiceExceptions import InvalidArgumentsException +from common.method_wrappers.ServiceExceptions import InvalidArgumentsException from ._Builder import get_uuid_from_string, get_uuid_random from .Context import context_get_uuid diff --git a/src/context/service/database/uuids/Slice.py b/src/context/service/database/uuids/Slice.py index 3b46e582e..b7d1465dd 100644 --- a/src/context/service/database/uuids/Slice.py +++ b/src/context/service/database/uuids/Slice.py @@ -14,7 +14,7 @@ from typing import Tuple from common.proto.context_pb2 import SliceId -from common.rpc_method_wrapper.ServiceExceptions import InvalidArgumentsException +from common.method_wrappers.ServiceExceptions import InvalidArgumentsException from ._Builder import get_uuid_from_string, get_uuid_random from .Context import context_get_uuid diff --git a/src/context/service/database/uuids/Topology.py b/src/context/service/database/uuids/Topology.py index c3c9175d8..e23f95238 100644 --- a/src/context/service/database/uuids/Topology.py +++ b/src/context/service/database/uuids/Topology.py @@ -14,7 +14,7 @@ from typing import Tuple from common.proto.context_pb2 import TopologyId -from common.rpc_method_wrapper.ServiceExceptions import InvalidArgumentsException +from common.method_wrappers.ServiceExceptions import InvalidArgumentsException from ._Builder import get_uuid_from_string, get_uuid_random from .Context import context_get_uuid -- GitLab From ed1e9819dceacda35f8101634d070c13dc8149be Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Thu, 12 Jan 2023 17:25:00 +0000 Subject: [PATCH 041/158] Context component: - corrected logger instantiation - arranged performance collector for unit tests - moved performance evaluation dump method to Metrics Pool --- src/common/method_wrappers/Decorator.py | 74 ++++++++++++++++++- .../service/ContextServiceServicerImpl.py | 2 +- src/context/service/__main__.py | 5 +- src/context/tests/conftest.py | 73 ++---------------- 4 files changed, 80 insertions(+), 74 deletions(-) diff --git a/src/common/method_wrappers/Decorator.py b/src/common/method_wrappers/Decorator.py index 7ee2a919e..1a384d15a 100644 --- a/src/common/method_wrappers/Decorator.py +++ b/src/common/method_wrappers/Decorator.py @@ -12,9 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -import grpc, logging, threading +import grpc, json, logging, threading from enum import Enum -from typing import Dict, Tuple +from prettytable import PrettyTable +from typing import Any, Dict, List, Set, Tuple from prometheus_client import Counter, Histogram from prometheus_client.metrics import MetricWrapperBase, INF from common.tools.grpc.Tools import grpc_message_to_json_string @@ -83,6 +84,75 @@ class MetricsPool: return histogram_duration, counter_started, counter_completed, counter_failed + def get_pretty_table(self, remove_empty_buckets : bool = True) -> PrettyTable: + with MetricsPool.lock: + method_to_metric_fields : Dict[str, Dict[str, Dict[str, Any]]] = dict() + bucket_bounds : Set[str] = set() + for raw_metric_name,raw_metric_data in MetricsPool.metrics.items(): + if '_COUNTER_' in raw_metric_name: + method_name,metric_name = raw_metric_name.split('_COUNTER_') + elif '_HISTOGRAM_' in raw_metric_name: + method_name,metric_name = raw_metric_name.split('_HISTOGRAM_') + else: + raise Exception('Unsupported metric: {:s}'.format(raw_metric_name)) # pragma: no cover + metric_data = method_to_metric_fields.setdefault(method_name, dict()).setdefault(metric_name, dict()) + for field_name,labels,value,_,_ in raw_metric_data._child_samples(): + if field_name == '_bucket': bucket_bounds.add(labels['le']) + if len(labels) > 0: field_name = '{:s}:{:s}'.format(field_name, json.dumps(labels, sort_keys=True)) + metric_data[field_name] = value + print('method_to_metric_fields', method_to_metric_fields) + + def sort_stats_key(item : List) -> float: + str_duration = str(item[0]) + if str_duration == '---': return 0.0 + return float(str_duration.replace(' ms', '')) + + field_names = ['Method', 'TOT', 'OK', 'ERR', 'avg(Dur)'] + bucket_bounds = sorted(bucket_bounds, key=float) # convert buckets to float to get the key + bucket_column_names = ['<={:s}'.format(bucket_bound) for bucket_bound in bucket_bounds] + field_names.extend(bucket_column_names) + + pt_stats = PrettyTable( + field_names=field_names, sortby='avg(Dur)', sort_key=sort_stats_key, reversesort=True) + for f in field_names: pt_stats.align[f] = 'r' + for f in ['Method']: pt_stats.align[f] = 'l' + + for method_name,metrics in method_to_metric_fields.items(): + counter_started_value = int(metrics['STARTED']['_total']) + if counter_started_value == 0: + #pt_stats.add_row([method_name, '---', '---', '---', '---']) + continue + counter_completed_value = int(metrics['COMPLETED']['_total']) + counter_failed_value = int(metrics['FAILED']['_total']) + duration_count_value = float(metrics['DURATION']['_count']) + duration_sum_value = float(metrics['DURATION']['_sum']) + duration_avg_value = duration_sum_value/duration_count_value + + row = [ + method_name, str(counter_started_value), str(counter_completed_value), str(counter_failed_value), + '{:.3f} ms'.format(1000.0 * duration_avg_value), + ] + + total_count = 0 + for bucket_bound in bucket_bounds: + labels = json.dumps({"le": bucket_bound}, sort_keys=True) + bucket_name = '_bucket:{:s}'.format(labels) + accumulated_count = int(metrics['DURATION'][bucket_name]) + bucket_count = accumulated_count - total_count + row.append(str(bucket_count) if bucket_count > 0 else '') + total_count = accumulated_count + + pt_stats.add_row(row) + + if remove_empty_buckets: + for bucket_column_name in bucket_column_names: + col_index = pt_stats._field_names.index(bucket_column_name) + num_non_empties = sum([1 for row in pt_stats._rows if len(row[col_index]) > 0]) + if num_non_empties > 0: continue + pt_stats.del_column(bucket_column_name) + + return pt_stats + def metered_subclass_method(metrics_pool : MetricsPool): def outer_wrapper(func): metrics = metrics_pool.get_metrics(func.__name__) diff --git a/src/context/service/ContextServiceServicerImpl.py b/src/context/service/ContextServiceServicerImpl.py index 7e7226570..3f1bd9c20 100644 --- a/src/context/service/ContextServiceServicerImpl.py +++ b/src/context/service/ContextServiceServicerImpl.py @@ -53,7 +53,7 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer self.messagebroker = messagebroker LOGGER.debug('Servicer Created') - def _get_metrics(self): return METRICS_POOL + def _get_metrics(self) -> MetricsPool: return METRICS_POOL # ----- Context ---------------------------------------------------------------------------------------------------- diff --git a/src/context/service/__main__.py b/src/context/service/__main__.py index 9960e94b5..145c91cf0 100644 --- a/src/context/service/__main__.py +++ b/src/context/service/__main__.py @@ -25,11 +25,10 @@ LOG_LEVEL = get_log_level() logging.basicConfig(level=LOG_LEVEL, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s") LOGGER = logging.getLogger(__name__) -LOGGER.addHandler(logging.StreamHandler(stream=sys.stderr)) -LOGGER.setLevel(logging.WARNING) +#LOGGER.addHandler(logging.StreamHandler(stream=sys.stderr)) +#LOGGER.setLevel(logging.WARNING) terminate = threading.Event() -LOGGER : logging.Logger = None def signal_handler(signal, frame): # pylint: disable=redefined-outer-name LOGGER.warning('Terminate signal received') diff --git a/src/context/tests/conftest.py b/src/context/tests/conftest.py index dc54c8cdc..25de05842 100644 --- a/src/context/tests/conftest.py +++ b/src/context/tests/conftest.py @@ -12,17 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -import json, os, pytest, sqlalchemy +import os, pytest, sqlalchemy from _pytest.config import Config from _pytest.terminal import TerminalReporter -from prettytable import PrettyTable -from typing import Any, Dict, List, Set, Tuple +from typing import Tuple from common.Constants import ServiceNameEnum from common.Settings import ( ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, ENVVAR_SUFIX_SERVICE_PORT_HTTP, get_env_var_name, get_service_port_grpc, get_service_port_http) from common.message_broker.Factory import get_messagebroker_backend, BackendEnum as MessageBrokerBackendEnum from common.message_broker.MessageBroker import MessageBroker +from common.method_wrappers.Decorator import MetricsPool from context.client.ContextClient import ContextClient from context.service.ContextService import ContextService from context.service.database.Engine import Engine @@ -47,7 +47,7 @@ def context_db_mb(request) -> Tuple[sqlalchemy.engine.Engine, MessageBroker]: yield _db_engine, _msg_broker _msg_broker.terminate() -RAW_METRICS = dict() +RAW_METRICS : MetricsPool = None @pytest.fixture(scope='session') def context_service( @@ -72,69 +72,6 @@ def pytest_terminal_summary( ): yield - method_to_metric_fields : Dict[str, Dict[str, Dict[str, Any]]] = dict() - bucket_bounds : Set[str] = set() - for raw_metric_name,raw_metric_data in RAW_METRICS.items(): - if '_COUNTER_' in raw_metric_name: - method_name,metric_name = raw_metric_name.split('_COUNTER_') - elif '_HISTOGRAM_' in raw_metric_name: - method_name,metric_name = raw_metric_name.split('_HISTOGRAM_') - else: - raise Exception('Unsupported metric: {:s}'.format(raw_metric_name)) # pragma: no cover - metric_data = method_to_metric_fields.setdefault(method_name, dict()).setdefault(metric_name, dict()) - for field_name,labels,value,_,_ in raw_metric_data._child_samples(): - if field_name == '_bucket': bucket_bounds.add(labels['le']) - if len(labels) > 0: field_name = '{:s}:{:s}'.format(field_name, json.dumps(labels, sort_keys=True)) - metric_data[field_name] = value - #print('method_to_metric_fields', method_to_metric_fields) - - def sort_stats_key(item : List) -> float: - str_duration = str(item[0]) - if str_duration == '---': return 0.0 - return float(str_duration.replace(' ms', '')) - - field_names = ['Method', 'TOT', 'OK', 'ERR', 'avg(Dur)'] - bucket_bounds = sorted(bucket_bounds, key=float) # convert buckets to float to get the key - bucket_column_names = ['<={:s}'.format(bucket_bound) for bucket_bound in bucket_bounds] - field_names.extend(bucket_column_names) - - pt_stats = PrettyTable(field_names=field_names, sortby='avg(Dur)', sort_key=sort_stats_key, reversesort=True) - for f in field_names: pt_stats.align[f] = 'r' - for f in ['Method']: pt_stats.align[f] = 'l' - - for method_name,metrics in method_to_metric_fields.items(): - counter_started_value = int(metrics['STARTED']['_total']) - if counter_started_value == 0: - #pt_stats.add_row([method_name, '---', '---', '---', '---']) - continue - counter_completed_value = int(metrics['COMPLETED']['_total']) - counter_failed_value = int(metrics['FAILED']['_total']) - duration_count_value = float(metrics['DURATION']['_count']) - duration_sum_value = float(metrics['DURATION']['_sum']) - duration_avg_value = duration_sum_value/duration_count_value - - row = [ - method_name, str(counter_started_value), str(counter_completed_value), str(counter_failed_value), - '{:.3f} ms'.format(1000.0 * duration_avg_value), - ] - - total_count = 0 - for bucket_bound in bucket_bounds: - labels = json.dumps({"le": bucket_bound}, sort_keys=True) - bucket_name = '_bucket:{:s}'.format(labels) - accumulated_count = int(metrics['DURATION'][bucket_name]) - bucket_count = accumulated_count - total_count - row.append(str(bucket_count) if bucket_count > 0 else '') - total_count = accumulated_count - - pt_stats.add_row(row) - - for bucket_column_name in bucket_column_names: - col_index = pt_stats._field_names.index(bucket_column_name) - num_non_empties = sum([1 for row in pt_stats._rows if len(row[col_index]) > 0]) - if num_non_empties > 0: continue - pt_stats.del_column(bucket_column_name) - print('') print('Performance Results:') - print(pt_stats.get_string()) + print(RAW_METRICS.get_pretty_table().get_string()) -- GitLab From 359705e33bc01257eb0f36417ea269273f199e50 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Thu, 12 Jan 2023 17:33:44 +0000 Subject: [PATCH 042/158] Common - Method Wrappers: - corrected metrics names --- src/common/method_wrappers/Decorator.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/common/method_wrappers/Decorator.py b/src/common/method_wrappers/Decorator.py index 1a384d15a..f918b8458 100644 --- a/src/common/method_wrappers/Decorator.py +++ b/src/common/method_wrappers/Decorator.py @@ -100,7 +100,7 @@ class MetricsPool: if field_name == '_bucket': bucket_bounds.add(labels['le']) if len(labels) > 0: field_name = '{:s}:{:s}'.format(field_name, json.dumps(labels, sort_keys=True)) metric_data[field_name] = value - print('method_to_metric_fields', method_to_metric_fields) + #print('method_to_metric_fields', method_to_metric_fields) def sort_stats_key(item : List) -> float: str_duration = str(item[0]) @@ -118,12 +118,12 @@ class MetricsPool: for f in ['Method']: pt_stats.align[f] = 'l' for method_name,metrics in method_to_metric_fields.items(): - counter_started_value = int(metrics['STARTED']['_total']) + counter_started_value = int(metrics['REQUESTS_STARTED']['_total']) if counter_started_value == 0: #pt_stats.add_row([method_name, '---', '---', '---', '---']) continue - counter_completed_value = int(metrics['COMPLETED']['_total']) - counter_failed_value = int(metrics['FAILED']['_total']) + counter_completed_value = int(metrics['REQUESTS_COMPLETED']['_total']) + counter_failed_value = int(metrics['REQUESTS_FAILED']['_total']) duration_count_value = float(metrics['DURATION']['_count']) duration_sum_value = float(metrics['DURATION']['_sum']) duration_avg_value = duration_sum_value/duration_count_value -- GitLab From 692fc03ee9658b51f57f3c8004232101cbc7f18a Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Fri, 13 Jan 2023 15:29:44 +0000 Subject: [PATCH 043/158] Common: - Added backend for NATS message broker - removed unneeded test script --- src/common/message_broker/Factory.py | 2 + .../message_broker/backend/BackendEnum.py | 3 +- .../backend/nats/NatsBackend.py | 49 +++++++++++++++ .../backend/nats/NatsBackendThread.py | 61 +++++++++++++++++++ .../message_broker/backend/nats/__init__.py | 14 +++++ test-context.sh | 58 ------------------ 6 files changed, 128 insertions(+), 59 deletions(-) create mode 100644 src/common/message_broker/backend/nats/NatsBackend.py create mode 100644 src/common/message_broker/backend/nats/NatsBackendThread.py create mode 100644 src/common/message_broker/backend/nats/__init__.py delete mode 100755 test-context.sh diff --git a/src/common/message_broker/Factory.py b/src/common/message_broker/Factory.py index c5d48f9e1..e60118706 100644 --- a/src/common/message_broker/Factory.py +++ b/src/common/message_broker/Factory.py @@ -17,12 +17,14 @@ from typing import Optional, Union from .backend._Backend import _Backend from .backend.BackendEnum import BackendEnum from .backend.inmemory.InMemoryBackend import InMemoryBackend +from .backend.nats.NatsBackend import NatsBackend #from .backend.redis.RedisBackend import RedisBackend LOGGER = logging.getLogger(__name__) BACKENDS = { BackendEnum.INMEMORY.value: InMemoryBackend, + BackendEnum.NATS.value: NatsBackend, #BackendEnum.REDIS.value: RedisBackend, #BackendEnum.KAFKA.value: KafkaBackend, #BackendEnum.RABBITMQ.value: RabbitMQBackend, diff --git a/src/common/message_broker/backend/BackendEnum.py b/src/common/message_broker/backend/BackendEnum.py index bf95f1764..05dde8197 100644 --- a/src/common/message_broker/backend/BackendEnum.py +++ b/src/common/message_broker/backend/BackendEnum.py @@ -16,7 +16,8 @@ from enum import Enum class BackendEnum(Enum): INMEMORY = 'inmemory' - REDIS = 'redis' + NATS = 'nats' + #REDIS = 'redis' #KAFKA = 'kafka' #RABBITMQ = 'rabbitmq' #ZEROMQ = 'zeromq' diff --git a/src/common/message_broker/backend/nats/NatsBackend.py b/src/common/message_broker/backend/nats/NatsBackend.py new file mode 100644 index 000000000..0825095eb --- /dev/null +++ b/src/common/message_broker/backend/nats/NatsBackend.py @@ -0,0 +1,49 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import queue, threading +from typing import Iterator, Set, Tuple +from common.Settings import get_setting +from common.message_broker.Message import Message +from .._Backend import _Backend +from .NatsBackendThread import NatsBackendThread + +DEFAULT_NATS_URI = 'nats://127.0.0.1:4222' + +class NatsBackend(_Backend): + def __init__(self, **settings) -> None: # pylint: disable=super-init-not-called + nats_uri = get_setting('NATS_URI', settings=settings, default=DEFAULT_NATS_URI) + self._terminate = threading.Event() + self._nats_backend_thread = NatsBackendThread(nats_uri) + self._nats_backend_thread.start() + + def terminate(self) -> None: + self._terminate.set() + self._nats_backend_thread.terminate() + self._nats_backend_thread.join() + + def publish(self, topic_name : str, message_content : str) -> None: + self._nats_backend_thread.publish(topic_name, message_content) + + def consume(self, topic_names : Set[str], consume_timeout : float) -> Iterator[Tuple[str, str]]: + out_queue = queue.Queue[Message]() + unsubscribe = threading.Event() + for topic_name in topic_names: + self._nats_backend_thread.subscribe(topic_name, consume_timeout, out_queue, unsubscribe) + while not self._terminate.is_set(): + try: + yield out_queue.get(block=True, timeout=consume_timeout) + except queue.Empty: + continue + unsubscribe.set() diff --git a/src/common/message_broker/backend/nats/NatsBackendThread.py b/src/common/message_broker/backend/nats/NatsBackendThread.py new file mode 100644 index 000000000..e11ab7c04 --- /dev/null +++ b/src/common/message_broker/backend/nats/NatsBackendThread.py @@ -0,0 +1,61 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import asyncio, nats, nats.errors, queue, threading +from common.message_broker.Message import Message + +class NatsBackendThread(threading.Thread): + def __init__(self, nats_uri : str) -> None: + self._nats_uri = nats_uri + self._event_loop = asyncio.get_event_loop() + self._terminate = asyncio.Event() + self._publish_queue = asyncio.Queue[Message]() + super().__init__() + + def terminate(self) -> None: + self._terminate.set() + + async def _run_publisher(self) -> None: + client = await nats.connect(servers=[self._nats_uri]) + while not self._terminate.is_set(): + message : Message = await self._publish_queue.get() + await client.publish(message.topic, message.content.encode('UTF-8')) + await client.drain() + + def publish(self, topic_name : str, message_content : str) -> None: + self._publish_queue.put_nowait(Message(topic_name, message_content)) + + async def _run_subscriber( + self, topic_name : str, timeout : float, out_queue : queue.Queue[Message], unsubscribe : threading.Event + ) -> None: + client = await nats.connect(servers=[self._nats_uri]) + subscription = await client.subscribe(topic_name) + while not self._terminate.is_set() and not unsubscribe.is_set(): + try: + message = await subscription.next_msg(timeout) + except nats.errors.TimeoutError: + continue + out_queue.put(Message(message.subject, message.data.decode('UTF-8'))) + await subscription.unsubscribe() + await client.drain() + + def subscribe( + self, topic_name : str, timeout : float, out_queue : queue.Queue[Message], unsubscribe : threading.Event + ) -> None: + self._event_loop.create_task(self._run_subscriber(topic_name, timeout, out_queue, unsubscribe)) + + def run(self) -> None: + asyncio.set_event_loop(self._event_loop) + self._event_loop.create_task(self._run_publisher()) + self._event_loop.run_until_complete(self._terminate.wait()) diff --git a/src/common/message_broker/backend/nats/__init__.py b/src/common/message_broker/backend/nats/__init__.py new file mode 100644 index 000000000..70a332512 --- /dev/null +++ b/src/common/message_broker/backend/nats/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/test-context.sh b/test-context.sh deleted file mode 100755 index 212ce5bbe..000000000 --- a/test-context.sh +++ /dev/null @@ -1,58 +0,0 @@ -#!/bin/bash -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -######################################################################################################################## -# Define your deployment settings here -######################################################################################################################## - -# If not already set, set the name of the Kubernetes namespace to deploy to. -export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"} - -######################################################################################################################## -# Automated steps start here -######################################################################################################################## - -PROJECTDIR=`pwd` - -cd $PROJECTDIR/src -RCFILE=$PROJECTDIR/coverage/.coveragerc -COVERAGEFILE=$PROJECTDIR/coverage/.coverage - -# Destroy old coverage file and configure the correct folder on the .coveragerc file -rm -f $COVERAGEFILE -cat $PROJECTDIR/coverage/.coveragerc.template | sed s+~/tfs-ctrl+$PROJECTDIR+g > $RCFILE - -#export CRDB_URI="cockroachdb://tfs:tfs123@127.0.0.1:26257/tfs_test?sslmode=require" -export CRDB_URI="cockroachdb://tfs:tfs123@10.1.7.195:26257/tfs_test?sslmode=require" -export PYTHONPATH=/home/tfs/tfs-ctrl/src - -# Run unitary tests and analyze coverage of code at same time -# helpful pytest flags: --log-level=INFO -o log_cli=true --verbose --maxfail=1 --durations=0 -coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose --maxfail=1 \ - context/tests/test_hasher.py \ - context/tests/test_context.py \ - context/tests/test_topology.py \ - context/tests/test_device.py \ - context/tests/test_link.py \ - context/tests/test_service.py \ - context/tests/test_slice.py \ - context/tests/test_connection.py \ - context/tests/test_policy.py - -echo -echo "Coverage report:" -echo "----------------" -#coverage report --rcfile=$RCFILE --sort cover --show-missing --skip-covered | grep --color -E -i "^context/.*$|$" -coverage report --rcfile=$RCFILE --sort cover --show-missing --skip-covered --include="context/*" -- GitLab From dcd19e785d2aa8aa78ab9250aaf87b1342a18876 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Fri, 13 Jan 2023 15:47:43 +0000 Subject: [PATCH 044/158] Context: - updated run_tests_locally script - updated GitLab CI/CD with NATS - updated ContextModel and TopologyModel with created/updated - added logic for Context and Topology events created/updated/deleted - activated dependencies for test_connection - activated event testing in Context and Topology entities - corrected conftest for Context component --- scripts/run_tests_locally-context.sh | 72 ++++++++++++++----- src/context/.gitlab-ci.yml | 15 +++- src/context/service/Constants.py | 30 -------- .../service/ContextServiceServicerImpl.py | 48 +++++++------ src/context/service/Events.py | 20 +++++- src/context/service/database/Context.py | 44 +++++++----- src/context/service/database/Topology.py | 46 ++++++------ .../service/database/models/ContextModel.py | 4 +- .../service/database/models/TopologyModel.py | 4 +- src/context/tests/conftest.py | 11 +-- src/context/tests/test_connection.py | 2 +- src/context/tests/test_context.py | 45 ++++++------ src/context/tests/test_topology.py | 66 ++++++++--------- 13 files changed, 230 insertions(+), 177 deletions(-) delete mode 100644 src/context/service/Constants.py diff --git a/scripts/run_tests_locally-context.sh b/scripts/run_tests_locally-context.sh index 8b0c82b3e..0124469ec 100755 --- a/scripts/run_tests_locally-context.sh +++ b/scripts/run_tests_locally-context.sh @@ -13,28 +13,66 @@ # See the License for the specific language governing permissions and # limitations under the License. -######################################################################################################################## -# Define your deployment settings here -######################################################################################################################## - -# If not already set, set the name of the Kubernetes namespace to deploy to. -export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"} - -######################################################################################################################## -# Automated steps start here -######################################################################################################################## - PROJECTDIR=`pwd` cd $PROJECTDIR/src RCFILE=$PROJECTDIR/coverage/.coveragerc +COVERAGEFILE=$PROJECTDIR/coverage/.coverage -#export CRDB_URI="cockroachdb://tfs:tfs123@127.0.0.1:26257/tfs_test?sslmode=require" -export CRDB_URI="cockroachdb://tfs:tfs123@10.1.7.195:26257/tfs_test?sslmode=require" -export PYTHONPATH=/home/tfs/tfs-ctrl/src +# Destroy old coverage file and configure the correct folder on the .coveragerc file +rm -f $COVERAGEFILE +cat $PROJECTDIR/coverage/.coveragerc.template | sed s+~/tfs-ctrl+$PROJECTDIR+g > $RCFILE + +echo +echo "Pre-test clean-up:" +echo "------------------" +docker rm -f crdb nats +docker volume rm -f crdb +docker network rm tfs-br -# Run unitary tests and analyze coverage of code at same time +echo +echo "Pull Docker images:" +echo "-------------------" +docker pull cockroachdb/cockroach:latest-v22.2 +docker pull nats:2.9 + +echo +echo "Create test environment:" +echo "------------------------" +docker network create -d bridge --subnet=172.254.254.0/24 --gateway=172.254.254.1 --ip-range=172.254.254.0/24 tfs-br +docker volume create crdb +docker run --name crdb -d --network=tfs-br --ip 172.254.254.10 -p 26257:26257 -p 8080:8080 \ + --env COCKROACH_DATABASE=tfs_test --env COCKROACH_USER=tfs --env COCKROACH_PASSWORD=tfs123\ + --volume "crdb:/cockroach/cockroach-data" \ + cockroachdb/cockroach:latest-v22.2 start-single-node +docker run --name nats -d --network=tfs-br --ip 172.254.254.11 -p 4222:4222 -p 8222:8222 \ + nats:2.9 --http_port 8222 --user tfs --pass tfs123 +echo "Waiting for initialization..." +sleep 10 +docker ps -a + +echo +echo "Run unitary tests and analyze code coverage:" +echo "--------------------------------------------" +export CRDB_URI="cockroachdb://tfs:tfs123@172.254.254.10:26257/tfs_test?sslmode=require" +export MB_BACKEND="nats" +export NATS_URI="nats://tfs:tfs123@172.254.254.11:4222" +export PYTHONPATH=/home/tfs/tfs-ctrl/src # helpful pytest flags: --log-level=INFO -o log_cli=true --verbose --maxfail=1 --durations=0 coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose --maxfail=1 \ - context/tests/test_unitary.py \ - context/tests/test_hasher.py + context/tests/test_context.py \ + context/tests/test_topology.py + #context/tests/test_*.py + +echo +echo "Coverage report:" +echo "----------------" +#coverage report --rcfile=$RCFILE --sort cover --show-missing --skip-covered | grep --color -E -i "^context/.*$|$" +coverage report --rcfile=$RCFILE --sort cover --show-missing --skip-covered --include="context/*" + +echo +echo "Post-test clean-up:" +echo "-------------------" +docker rm -f crdb nats +docker volume rm -f crdb +docker network rm tfs-br diff --git a/src/context/.gitlab-ci.yml b/src/context/.gitlab-ci.yml index 468566701..2a707004f 100644 --- a/src/context/.gitlab-ci.yml +++ b/src/context/.gitlab-ci.yml @@ -51,10 +51,12 @@ unit test context: - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create -d bridge teraflowbridge; fi - if docker container ls | grep crdb; then docker rm -f crdb; else echo "CockroachDB container is not in the system"; fi - if docker volume ls | grep crdb; then docker volume rm -f crdb; else echo "CockroachDB volume is not in the system"; fi + - if docker container ls | grep nats; then docker rm -f nats; else echo "NATS container is not in the system"; fi - if docker container ls | grep $IMAGE_NAME; then docker rm -f $IMAGE_NAME; else echo "$IMAGE_NAME container is not in the system"; fi script: - docker pull "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG" - docker pull "cockroachdb/cockroach:latest-v22.2" + - docker pull "nats:2.9" - docker volume create crdb - > docker run --name crdb -d --network=teraflowbridge -p 26257:26257 -p 8080:8080 @@ -63,16 +65,24 @@ unit test context: --env COCKROACH_PASSWORD=tfs123 --volume "crdb:/cockroach/cockroach-data" cockroachdb/cockroach:latest-v22.2 start-single-node + - > + docker run --name nats -d --network=teraflowbridge -p 4222:4222 -p 8222:8222 + nats:2.9 --http_port 8222 --user tfs --pass tfs123 + - echo "Waiting for initialization..." - sleep 10 + - docker ps -a - CRDB_ADDRESS=$(docker inspect crdb --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}") + - NATS_ADDRESS=$(docker inspect nats --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}") - > docker run --name $IMAGE_NAME -d -p 1010:1010 --env "CRDB_URI=cockroachdb://tfs:tfs123@${CRDB_ADDRESS}:26257/tfs_test?sslmode=require" + --env "MB_BACKEND=nats" + --env "NATS_URI=nats://tfs:tfs123@${NATS_ADDRESS}:4222" --volume "$PWD/src/$IMAGE_NAME/tests:/opt/results" --network=teraflowbridge $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG - docker ps -a - - sleep 10 + - sleep 3 - docker logs $IMAGE_NAME - > docker exec -i $IMAGE_NAME bash -c @@ -80,8 +90,7 @@ unit test context: - docker exec -i $IMAGE_NAME bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing" coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/' after_script: - - docker rm -f $IMAGE_NAME - - docker rm -f crdb + - docker rm -f $IMAGE_NAME crdb nats - docker volume rm -f crdb - docker network rm teraflowbridge rules: diff --git a/src/context/service/Constants.py b/src/context/service/Constants.py deleted file mode 100644 index 1eb274cf0..000000000 --- a/src/context/service/Constants.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -TOPIC_CONNECTION = 'connection' -TOPIC_CONTEXT = 'context' -TOPIC_DEVICE = 'device' -TOPIC_LINK = 'link' -#TOPIC_POLICY = 'policy' -TOPIC_SERVICE = 'service' -TOPIC_SLICE = 'slice' -TOPIC_TOPOLOGY = 'topology' - -TOPICS = { - TOPIC_CONNECTION, TOPIC_CONTEXT, TOPIC_DEVICE, TOPIC_LINK, - #TOPIC_POLICY, - TOPIC_SERVICE, TOPIC_SLICE, TOPIC_TOPOLOGY -} - -CONSUME_TIMEOUT = 0.5 # seconds diff --git a/src/context/service/ContextServiceServicerImpl.py b/src/context/service/ContextServiceServicerImpl.py index 3f1bd9c20..1528d64d9 100644 --- a/src/context/service/ContextServiceServicerImpl.py +++ b/src/context/service/ContextServiceServicerImpl.py @@ -19,7 +19,7 @@ from common.proto.context_pb2 import ( Connection, ConnectionEvent, ConnectionId, ConnectionIdList, ConnectionList, Context, ContextEvent, ContextId, ContextIdList, ContextList, Device, DeviceEvent, DeviceId, DeviceIdList, DeviceList, - Empty, + Empty, EventTypeEnum, Link, LinkEvent, LinkId, LinkIdList, LinkList, Service, ServiceEvent, ServiceId, ServiceIdList, ServiceList, Slice, SliceEvent, SliceId, SliceIdList, SliceList, @@ -38,9 +38,9 @@ from .database.PolicyRule import ( from .database.Service import service_delete, service_get, service_list_ids, service_list_objs, service_set from .database.Slice import slice_delete, slice_get, slice_list_ids, slice_list_objs, slice_set, slice_unset from .database.Topology import topology_delete, topology_get, topology_list_ids, topology_list_objs, topology_set -from .Constants import ( +from .Events import ( CONSUME_TIMEOUT, TOPIC_CONNECTION, TOPIC_CONTEXT, TOPIC_DEVICE, TOPIC_LINK, #TOPIC_POLICY, - TOPIC_SERVICE, TOPIC_SLICE, TOPIC_TOPOLOGY) + TOPIC_SERVICE, TOPIC_SLICE, TOPIC_TOPOLOGY, notify_event) LOGGER = logging.getLogger(__name__) @@ -60,28 +60,29 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def ListContextIds(self, request : Empty, context : grpc.ServicerContext) -> ContextIdList: - return context_list_ids(self.db_engine) + return ContextIdList(context_ids=context_list_ids(self.db_engine)) @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def ListContexts(self, request : Empty, context : grpc.ServicerContext) -> ContextList: - return context_list_objs(self.db_engine) + return ContextList(contexts=context_list_objs(self.db_engine)) @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def GetContext(self, request : ContextId, context : grpc.ServicerContext) -> Context: - return context_get(self.db_engine, request) + return Context(**context_get(self.db_engine, request)) @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def SetContext(self, request : Context, context : grpc.ServicerContext) -> ContextId: - context_id,updated = context_set(self.db_engine, request) # pylint: disable=unused-variable - #event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE - #notify_event(self.messagebroker, TOPIC_CONTEXT, event_type, {'context_id': context_id}) - return context_id + context_id,updated = context_set(self.db_engine, request) + event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE + notify_event(self.messagebroker, TOPIC_CONTEXT, event_type, {'context_id': context_id}) + return ContextId(**context_id) @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def RemoveContext(self, request : ContextId, context : grpc.ServicerContext) -> Empty: - deleted = context_delete(self.db_engine, request) # pylint: disable=unused-variable - #if deleted: - # notify_event(self.messagebroker, TOPIC_CONTEXT, EventTypeEnum.EVENTTYPE_REMOVE, {'context_id': request}) + context_id,deleted = context_delete(self.db_engine, request) + if deleted: + event_type = EventTypeEnum.EVENTTYPE_REMOVE + notify_event(self.messagebroker, TOPIC_CONTEXT, event_type, {'context_id': context_id}) return Empty() @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) @@ -94,28 +95,29 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def ListTopologyIds(self, request : ContextId, context : grpc.ServicerContext) -> TopologyIdList: - return topology_list_ids(self.db_engine, request) + return TopologyIdList(topology_ids=topology_list_ids(self.db_engine, request)) @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def ListTopologies(self, request : ContextId, context : grpc.ServicerContext) -> TopologyList: - return topology_list_objs(self.db_engine, request) + return TopologyList(topologies=topology_list_objs(self.db_engine, request)) @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def GetTopology(self, request : TopologyId, context : grpc.ServicerContext) -> Topology: - return topology_get(self.db_engine, request) + return Topology(**topology_get(self.db_engine, request)) @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def SetTopology(self, request : Topology, context : grpc.ServicerContext) -> TopologyId: - topology_id,updated = topology_set(self.db_engine, request) # pylint: disable=unused-variable - #event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE - #notify_event(self.messagebroker, TOPIC_TOPOLOGY, event_type, {'topology_id': topology_id}) - return topology_id + topology_id,updated = topology_set(self.db_engine, request) + event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE + notify_event(self.messagebroker, TOPIC_TOPOLOGY, event_type, {'topology_id': topology_id}) + return TopologyId(**topology_id) @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def RemoveTopology(self, request : TopologyId, context : grpc.ServicerContext) -> Empty: - deleted = topology_delete(self.db_engine, request) # pylint: disable=unused-variable - #if deleted: - # notify_event(self.messagebroker, TOPIC_TOPOLOGY, EventTypeEnum.EVENTTYPE_REMOVE, {'topology_id': request}) + topology_id,deleted = topology_delete(self.db_engine, request) + if deleted: + event_type = EventTypeEnum.EVENTTYPE_REMOVE + notify_event(self.messagebroker, TOPIC_TOPOLOGY, event_type, {'topology_id': topology_id}) return Empty() @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) diff --git a/src/context/service/Events.py b/src/context/service/Events.py index 46b1d36c4..e7cf1997c 100644 --- a/src/context/service/Events.py +++ b/src/context/service/Events.py @@ -18,9 +18,25 @@ from common.message_broker.Message import Message from common.message_broker.MessageBroker import MessageBroker from common.proto.context_pb2 import EventTypeEnum -def notify_event( - messagebroker : MessageBroker, topic_name : str, event_type : EventTypeEnum, fields : Dict[str, str]) -> None: +TOPIC_CONNECTION = 'connection' +TOPIC_CONTEXT = 'context' +TOPIC_DEVICE = 'device' +TOPIC_LINK = 'link' +#TOPIC_POLICY = 'policy' +TOPIC_SERVICE = 'service' +TOPIC_SLICE = 'slice' +TOPIC_TOPOLOGY = 'topology' + +TOPICS = { + TOPIC_CONNECTION, TOPIC_CONTEXT, TOPIC_DEVICE, TOPIC_LINK, #TOPIC_POLICY, + TOPIC_SERVICE, TOPIC_SLICE, TOPIC_TOPOLOGY +} +CONSUME_TIMEOUT = 0.5 # seconds + +def notify_event( + messagebroker : MessageBroker, topic_name : str, event_type : EventTypeEnum, fields : Dict[str, str] +) -> None: event = {'event': {'timestamp': {'timestamp': time.time()}, 'event_type': event_type}} for field_name, field_value in fields.items(): event[field_name] = field_value diff --git a/src/context/service/database/Context.py b/src/context/service/database/Context.py index 6c7003e95..e4fd13b22 100644 --- a/src/context/service/database/Context.py +++ b/src/context/service/database/Context.py @@ -12,13 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging +import datetime, logging from sqlalchemy.dialects.postgresql import insert from sqlalchemy.engine import Engine from sqlalchemy.orm import Session, sessionmaker from sqlalchemy_cockroachdb import run_transaction from typing import Dict, List, Optional, Tuple -from common.proto.context_pb2 import Context, ContextId, ContextIdList, ContextList +from common.proto.context_pb2 import Context, ContextId from common.method_wrappers.ServiceExceptions import NotFoundException from common.tools.object_factory.Context import json_context_id from .models.ContextModel import ContextModel @@ -26,21 +26,19 @@ from .uuids.Context import context_get_uuid LOGGER = logging.getLogger(__name__) -def context_list_ids(db_engine : Engine) -> ContextIdList: +def context_list_ids(db_engine : Engine) -> List[Dict]: def callback(session : Session) -> List[Dict]: obj_list : List[ContextModel] = session.query(ContextModel).all() - #.options(selectinload(ContextModel.topology)).filter_by(context_uuid=context_uuid).one_or_none() return [obj.dump_id() for obj in obj_list] - return ContextIdList(context_ids=run_transaction(sessionmaker(bind=db_engine), callback)) + return run_transaction(sessionmaker(bind=db_engine), callback) -def context_list_objs(db_engine : Engine) -> ContextList: +def context_list_objs(db_engine : Engine) -> List[Dict]: def callback(session : Session) -> List[Dict]: obj_list : List[ContextModel] = session.query(ContextModel).all() - #.options(selectinload(ContextModel.topology)).filter_by(context_uuid=context_uuid).one_or_none() return [obj.dump() for obj in obj_list] - return ContextList(contexts=run_transaction(sessionmaker(bind=db_engine), callback)) + return run_transaction(sessionmaker(bind=db_engine), callback) -def context_get(db_engine : Engine, request : ContextId) -> Context: +def context_get(db_engine : Engine, request : ContextId) -> Dict: context_uuid = context_get_uuid(request, allow_random=False) def callback(session : Session) -> Optional[Dict]: obj : Optional[ContextModel] = session.query(ContextModel).filter_by(context_uuid=context_uuid).one_or_none() @@ -51,9 +49,9 @@ def context_get(db_engine : Engine, request : ContextId) -> Context: raise NotFoundException('Context', raw_context_uuid, extra_details=[ 'context_uuid generated was: {:s}'.format(context_uuid) ]) - return Context(**obj) + return obj -def context_set(db_engine : Engine, request : Context) -> Tuple[ContextId, bool]: +def context_set(db_engine : Engine, request : Context) -> Tuple[Dict, bool]: context_name = request.name if len(context_name) == 0: context_name = request.context_id.context_uuid.uuid context_uuid = context_get_uuid(request.context_id, context_name=context_name, allow_random=True) @@ -72,26 +70,34 @@ def context_set(db_engine : Engine, request : Context) -> Tuple[ContextId, bool] if len(request.slice_ids) > 0: # pragma: no cover LOGGER.warning('Items in field "slice_ids" ignored. This field is used for retrieval purposes only.') + now = datetime.datetime.utcnow() context_data = [{ 'context_uuid': context_uuid, 'context_name': context_name, + 'created_at' : now, + 'updated_at' : now, }] - def callback(session : Session) -> None: + def callback(session : Session) -> bool: stmt = insert(ContextModel).values(context_data) stmt = stmt.on_conflict_do_update( index_elements=[ContextModel.context_uuid], - set_=dict(context_name = stmt.excluded.context_name) + set_=dict( + context_name = stmt.excluded.context_name, + updated_at = stmt.excluded.updated_at, + ) ) - session.execute(stmt) + stmt = stmt.returning(ContextModel.created_at, ContextModel.updated_at) + created_at,updated_at = session.execute(stmt).fetchone() + return updated_at > created_at - run_transaction(sessionmaker(bind=db_engine), callback) - updated = False # TODO: improve and check if created/updated - return ContextId(**json_context_id(context_uuid)),updated + updated = run_transaction(sessionmaker(bind=db_engine), callback) + return json_context_id(context_uuid),updated -def context_delete(db_engine : Engine, request : ContextId) -> bool: +def context_delete(db_engine : Engine, request : ContextId) -> Tuple[Dict, bool]: context_uuid = context_get_uuid(request, allow_random=False) def callback(session : Session) -> bool: num_deleted = session.query(ContextModel).filter_by(context_uuid=context_uuid).delete() return num_deleted > 0 - return run_transaction(sessionmaker(bind=db_engine), callback) + deleted = run_transaction(sessionmaker(bind=db_engine), callback) + return json_context_id(context_uuid),deleted diff --git a/src/context/service/database/Topology.py b/src/context/service/database/Topology.py index 40ecb6c39..75fc229d8 100644 --- a/src/context/service/database/Topology.py +++ b/src/context/service/database/Topology.py @@ -12,13 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging +import datetime, logging from sqlalchemy.dialects.postgresql import insert from sqlalchemy.engine import Engine from sqlalchemy.orm import Session, sessionmaker from sqlalchemy_cockroachdb import run_transaction from typing import Dict, List, Optional, Tuple -from common.proto.context_pb2 import ContextId, Topology, TopologyId, TopologyIdList, TopologyList +from common.proto.context_pb2 import ContextId, Topology, TopologyId from common.method_wrappers.ServiceExceptions import NotFoundException from common.tools.object_factory.Context import json_context_id from common.tools.object_factory.Topology import json_topology_id @@ -28,23 +28,21 @@ from .uuids.Topology import topology_get_uuid LOGGER = logging.getLogger(__name__) -def topology_list_ids(db_engine : Engine, request : ContextId) -> TopologyIdList: +def topology_list_ids(db_engine : Engine, request : ContextId) -> List[Dict]: context_uuid = context_get_uuid(request, allow_random=False) def callback(session : Session) -> List[Dict]: obj_list : List[TopologyModel] = session.query(TopologyModel).filter_by(context_uuid=context_uuid).all() - #.options(selectinload(ContextModel.topology)).filter_by(context_uuid=context_uuid).one_or_none() return [obj.dump_id() for obj in obj_list] - return TopologyIdList(topology_ids=run_transaction(sessionmaker(bind=db_engine), callback)) + return run_transaction(sessionmaker(bind=db_engine), callback) -def topology_list_objs(db_engine : Engine, request : ContextId) -> TopologyList: +def topology_list_objs(db_engine : Engine, request : ContextId) -> List[Dict]: context_uuid = context_get_uuid(request, allow_random=False) def callback(session : Session) -> List[Dict]: obj_list : List[TopologyModel] = session.query(TopologyModel).filter_by(context_uuid=context_uuid).all() - #.options(selectinload(ContextModel.topology)).filter_by(context_uuid=context_uuid).one_or_none() return [obj.dump() for obj in obj_list] - return TopologyList(topologies=run_transaction(sessionmaker(bind=db_engine), callback)) + return run_transaction(sessionmaker(bind=db_engine), callback) -def topology_get(db_engine : Engine, request : TopologyId) -> Topology: +def topology_get(db_engine : Engine, request : TopologyId) -> Dict: _,topology_uuid = topology_get_uuid(request, allow_random=False) def callback(session : Session) -> Optional[Dict]: obj : Optional[TopologyModel] = session.query(TopologyModel)\ @@ -58,9 +56,9 @@ def topology_get(db_engine : Engine, request : TopologyId) -> Topology: 'context_uuid generated was: {:s}'.format(context_uuid), 'topology_uuid generated was: {:s}'.format(topology_uuid), ]) - return Topology(**obj) + return obj -def topology_set(db_engine : Engine, request : Topology) -> Tuple[TopologyId, bool]: +def topology_set(db_engine : Engine, request : Topology) -> Tuple[Dict, bool]: topology_name = request.name if len(topology_name) == 0: topology_name = request.topology_id.topology_uuid.uuid context_uuid,topology_uuid = topology_get_uuid(request.topology_id, topology_name=topology_name, allow_random=True) @@ -75,27 +73,35 @@ def topology_set(db_engine : Engine, request : Topology) -> Tuple[TopologyId, bo if len(request.link_ids) > 0: # pragma: no cover LOGGER.warning('Items in field "link_ids" ignored. This field is used for retrieval purposes only.') + now = datetime.datetime.utcnow() topology_data = [{ 'context_uuid' : context_uuid, 'topology_uuid': topology_uuid, 'topology_name': topology_name, + 'created_at' : now, + 'updated_at' : now, }] def callback(session : Session) -> None: stmt = insert(TopologyModel).values(topology_data) stmt = stmt.on_conflict_do_update( index_elements=[TopologyModel.topology_uuid], - set_=dict(topology_name = stmt.excluded.topology_name) + set_=dict( + topology_name = stmt.excluded.topology_name, + updated_at = stmt.excluded.updated_at, + ) ) - session.execute(stmt) - - run_transaction(sessionmaker(bind=db_engine), callback) - updated = False # TODO: improve and check if created/updated - return TopologyId(**json_topology_id(topology_uuid, json_context_id(context_uuid))),updated + stmt = stmt.returning(TopologyModel.created_at, TopologyModel.updated_at) + created_at,updated_at = session.execute(stmt).fetchone() + return updated_at > created_at + + updated = run_transaction(sessionmaker(bind=db_engine), callback) + return json_topology_id(topology_uuid, context_id=json_context_id(context_uuid)),updated -def topology_delete(db_engine : Engine, request : TopologyId) -> bool: - _,topology_uuid = topology_get_uuid(request, allow_random=False) +def topology_delete(db_engine : Engine, request : TopologyId) -> Tuple[Dict, bool]: + context_uuid,topology_uuid = topology_get_uuid(request, allow_random=False) def callback(session : Session) -> bool: num_deleted = session.query(TopologyModel).filter_by(topology_uuid=topology_uuid).delete() return num_deleted > 0 - return run_transaction(sessionmaker(bind=db_engine), callback) + deleted = run_transaction(sessionmaker(bind=db_engine), callback) + return json_topology_id(topology_uuid, context_id=json_context_id(context_uuid)),deleted diff --git a/src/context/service/database/models/ContextModel.py b/src/context/service/database/models/ContextModel.py index 8dc5f545f..fee0f72a5 100644 --- a/src/context/service/database/models/ContextModel.py +++ b/src/context/service/database/models/ContextModel.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from sqlalchemy import Column, String +from sqlalchemy import Column, DateTime, String from sqlalchemy.dialects.postgresql import UUID from sqlalchemy.orm import relationship from typing import Dict @@ -23,6 +23,8 @@ class ContextModel(_Base): context_uuid = Column(UUID(as_uuid=False), primary_key=True) context_name = Column(String, nullable=False) + created_at = Column(DateTime) + updated_at = Column(DateTime) topologies = relationship('TopologyModel', back_populates='context') services = relationship('ServiceModel', back_populates='context') diff --git a/src/context/service/database/models/TopologyModel.py b/src/context/service/database/models/TopologyModel.py index 14fdaaeec..d4dbe173e 100644 --- a/src/context/service/database/models/TopologyModel.py +++ b/src/context/service/database/models/TopologyModel.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from sqlalchemy import Column, ForeignKey, String +from sqlalchemy import Column, DateTime, ForeignKey, String from sqlalchemy.dialects.postgresql import UUID from sqlalchemy.orm import relationship from typing import Dict @@ -24,6 +24,8 @@ class TopologyModel(_Base): topology_uuid = Column(UUID(as_uuid=False), primary_key=True) context_uuid = Column(ForeignKey('context.context_uuid'), nullable=False) topology_name = Column(String, nullable=False) + created_at = Column(DateTime) + updated_at = Column(DateTime) context = relationship('ContextModel', back_populates='topologies') topology_devices = relationship('TopologyDeviceModel') # back_populates='topology' diff --git a/src/context/tests/conftest.py b/src/context/tests/conftest.py index 25de05842..93b8c66be 100644 --- a/src/context/tests/conftest.py +++ b/src/context/tests/conftest.py @@ -20,7 +20,7 @@ from common.Constants import ServiceNameEnum from common.Settings import ( ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, ENVVAR_SUFIX_SERVICE_PORT_HTTP, get_env_var_name, get_service_port_grpc, get_service_port_http) -from common.message_broker.Factory import get_messagebroker_backend, BackendEnum as MessageBrokerBackendEnum +from common.message_broker.Factory import get_messagebroker_backend from common.message_broker.MessageBroker import MessageBroker from common.method_wrappers.Decorator import MetricsPool from context.client.ContextClient import ContextClient @@ -43,7 +43,7 @@ def context_db_mb(request) -> Tuple[sqlalchemy.engine.Engine, MessageBroker]: Engine.create_database(_db_engine) rebuild_database(_db_engine) - _msg_broker = MessageBroker(get_messagebroker_backend(backend=MessageBrokerBackendEnum.INMEMORY)) + _msg_broker = MessageBroker(get_messagebroker_backend()) yield _db_engine, _msg_broker _msg_broker.terminate() @@ -72,6 +72,7 @@ def pytest_terminal_summary( ): yield - print('') - print('Performance Results:') - print(RAW_METRICS.get_pretty_table().get_string()) + if RAW_METRICS is not None: + print('') + print('Performance Results:') + print(RAW_METRICS.get_pretty_table().get_string()) diff --git a/src/context/tests/test_connection.py b/src/context/tests/test_connection.py index f28fde356..4cc5407b4 100644 --- a/src/context/tests/test_connection.py +++ b/src/context/tests/test_connection.py @@ -24,7 +24,7 @@ from .Objects import ( DEVICE_R2, DEVICE_R2_ID, DEVICE_R3, DEVICE_R3_ID, SERVICE_R1_R2, SERVICE_R1_R2_ID, SERVICE_R1_R3, SERVICE_R1_R3_ID, SERVICE_R2_R3, SERVICE_R2_R3_ID, TOPOLOGY, TOPOLOGY_ID) -#@pytest.mark.depends(on=['context/tests/test_service.py::test_service', 'context/tests/test_slice.py::test_slice']) +@pytest.mark.depends(on=['context/tests/test_service.py::test_service', 'context/tests/test_slice.py::test_slice']) def test_connection(context_client : ContextClient) -> None: # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- diff --git a/src/context/tests/test_context.py b/src/context/tests/test_context.py index 443d36c92..4337db239 100644 --- a/src/context/tests/test_context.py +++ b/src/context/tests/test_context.py @@ -12,22 +12,23 @@ # See the License for the specific language governing permissions and # limitations under the License. -import copy, grpc, pytest -from common.proto.context_pb2 import Context, ContextId, Empty +import copy, grpc, pytest, time +from common.proto.context_pb2 import Context, ContextEvent, ContextId, Empty, EventTypeEnum from context.client.ContextClient import ContextClient from context.service.database.uuids.Context import context_get_uuid -#from context.client.EventsCollector import EventsCollector +from context.client.EventsCollector import EventsCollector from .Objects import CONTEXT, CONTEXT_ID, CONTEXT_NAME def test_context(context_client : ContextClient) -> None: # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- - #events_collector = EventsCollector( - # context_client, log_events_received=True, - # activate_context_collector = True, activate_topology_collector = False, activate_device_collector = False, - # activate_link_collector = False, activate_service_collector = False, activate_slice_collector = False, - # activate_connection_collector = False) - #events_collector.start() + events_collector = EventsCollector( + context_client, log_events_received=True, + activate_context_collector = True, activate_topology_collector = False, activate_device_collector = False, + activate_link_collector = False, activate_service_collector = False, activate_slice_collector = False, + activate_connection_collector = False) + events_collector.start() + time.sleep(3) # wait for the events collector to start # ----- Get when the object does not exist ------------------------------------------------------------------------- context_id = ContextId(**CONTEXT_ID) @@ -50,10 +51,10 @@ def test_context(context_client : ContextClient) -> None: assert response.context_uuid.uuid == context_uuid # ----- Check create event ----------------------------------------------------------------------------------------- - #event = events_collector.get_event(block=True, timeout=10.0) - #assert isinstance(event, ContextEvent) - #assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE - #assert event.context_id.context_uuid.uuid == context_uuid + event = events_collector.get_event(block=True, timeout=1.0) + assert isinstance(event, ContextEvent) + assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert event.context_id.context_uuid.uuid == context_uuid # ----- Get when the object exists --------------------------------------------------------------------------------- response = context_client.GetContext(ContextId(**CONTEXT_ID)) @@ -84,10 +85,10 @@ def test_context(context_client : ContextClient) -> None: assert response.context_uuid.uuid == context_uuid # ----- Check update event ----------------------------------------------------------------------------------------- - #event = events_collector.get_event(block=True, timeout=10.0) - #assert isinstance(event, ContextEvent) - #assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - #assert event.context_id.context_uuid.uuid == context_uuid + event = events_collector.get_event(block=True, timeout=1.0) + assert isinstance(event, ContextEvent) + assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + assert event.context_id.context_uuid.uuid == context_uuid # ----- Get when the object is modified ---------------------------------------------------------------------------- response = context_client.GetContext(ContextId(**CONTEXT_ID)) @@ -114,10 +115,10 @@ def test_context(context_client : ContextClient) -> None: context_client.RemoveContext(ContextId(**CONTEXT_ID)) # ----- Check remove event ----------------------------------------------------------------------------------------- - #event = events_collector.get_event(block=True, timeout=10.0) - #assert isinstance(event, ContextEvent) - #assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - #assert event.context_id.context_uuid.uuid == context_uuid + event = events_collector.get_event(block=True, timeout=1.0) + assert isinstance(event, ContextEvent) + assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert event.context_id.context_uuid.uuid == context_uuid # ----- List after deleting the object ----------------------------------------------------------------------------- response = context_client.ListContextIds(Empty()) @@ -127,4 +128,4 @@ def test_context(context_client : ContextClient) -> None: assert len(response.contexts) == 0 # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- - #events_collector.stop() + events_collector.stop() diff --git a/src/context/tests/test_topology.py b/src/context/tests/test_topology.py index 23e73edc8..2e7e38cb1 100644 --- a/src/context/tests/test_topology.py +++ b/src/context/tests/test_topology.py @@ -12,31 +12,31 @@ # See the License for the specific language governing permissions and # limitations under the License. -import copy, grpc, pytest -from common.proto.context_pb2 import Context, ContextId, Topology, TopologyId +import copy, grpc, pytest, time +from common.proto.context_pb2 import Context, ContextEvent, ContextId, EventTypeEnum, Topology, TopologyEvent, TopologyId from context.client.ContextClient import ContextClient from context.service.database.uuids.Topology import topology_get_uuid -#from context.client.EventsCollector import EventsCollector +from context.client.EventsCollector import EventsCollector from .Objects import CONTEXT, CONTEXT_ID, CONTEXT_NAME, TOPOLOGY, TOPOLOGY_ID, TOPOLOGY_NAME @pytest.mark.depends(on=['context/tests/test_context.py::test_context']) def test_topology(context_client : ContextClient) -> None: # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- - #events_collector = EventsCollector( - # context_client, log_events_received=True, - # activate_context_collector = False, activate_topology_collector = True, activate_device_collector = False, - # activate_link_collector = False, activate_service_collector = False, activate_slice_collector = False, - # activate_connection_collector = False) - #events_collector.start() + events_collector = EventsCollector( + context_client, log_events_received=True, + activate_context_collector = False, activate_topology_collector = True, activate_device_collector = False, + activate_link_collector = False, activate_service_collector = False, activate_slice_collector = False, + activate_connection_collector = False) + events_collector.start() + time.sleep(3) # wait for the events collector to start # ----- Prepare dependencies for the test and capture related events ----------------------------------------------- context_client.SetContext(Context(**CONTEXT)) - # event = events_collector.get_event(block=True) - # assert isinstance(event, ContextEvent) - # assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE - # assert event.context_id.context_uuid.uuid == context_uuid + event = events_collector.get_event(block=True, timeout=1.0) + assert isinstance(event, ContextEvent) + assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE # ----- Get when the object does not exist ------------------------------------------------------------------------- topology_id = TopologyId(**TOPOLOGY_ID) @@ -65,11 +65,11 @@ def test_topology(context_client : ContextClient) -> None: assert response.topology_uuid.uuid == topology_uuid # ----- Check create event ----------------------------------------------------------------------------------------- - #event = events_collector.get_event(block=True) - #assert isinstance(event, TopologyEvent) - #assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE - #assert event.topology_id.context_id.context_uuid.uuid == context_uuid - #assert event.topology_id.topology_uuid.uuid == topology_uuid + event = events_collector.get_event(block=True, timeout=1.0) + assert isinstance(event, TopologyEvent) + assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert event.topology_id.context_id.context_uuid.uuid == context_uuid + assert event.topology_id.topology_uuid.uuid == topology_uuid # ----- Get when the object exists --------------------------------------------------------------------------------- response = context_client.GetContext(ContextId(**CONTEXT_ID)) @@ -111,11 +111,11 @@ def test_topology(context_client : ContextClient) -> None: assert response.topology_uuid.uuid == topology_uuid # ----- Check update event ----------------------------------------------------------------------------------------- - #event = events_collector.get_event(block=True) - #assert isinstance(event, TopologyEvent) - #assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - #assert event.topology_id.context_id.context_uuid.uuid == context_uuid - #assert event.topology_id.topology_uuid.uuid == topology_uuid + event = events_collector.get_event(block=True, timeout=1.0) + assert isinstance(event, TopologyEvent) + assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + assert event.topology_id.context_id.context_uuid.uuid == context_uuid + assert event.topology_id.topology_uuid.uuid == topology_uuid # ----- Get when the object is modified ---------------------------------------------------------------------------- response = context_client.GetTopology(TopologyId(**TOPOLOGY_ID)) @@ -143,11 +143,11 @@ def test_topology(context_client : ContextClient) -> None: context_client.RemoveTopology(TopologyId(**TOPOLOGY_ID)) # ----- Check remove event ----------------------------------------------------------------------------------------- - #event = events_collector.get_event(block=True) - #assert isinstance(event, TopologyEvent) - #assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - #assert event.topology_id.context_id.context_uuid.uuid == context_uuid - #assert event.topology_id.topology_uuid.uuid == topology_uuid + event = events_collector.get_event(block=True, timeout=1.0) + assert isinstance(event, TopologyEvent) + assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert event.topology_id.context_id.context_uuid.uuid == context_uuid + assert event.topology_id.topology_uuid.uuid == topology_uuid # ----- List after deleting the object ----------------------------------------------------------------------------- response = context_client.GetContext(ContextId(**CONTEXT_ID)) @@ -164,10 +164,10 @@ def test_topology(context_client : ContextClient) -> None: # ----- Clean dependencies used in the test and capture related events --------------------------------------------- context_client.RemoveContext(ContextId(**CONTEXT_ID)) - #event = events_collector.get_event(block=True) - #assert isinstance(event, ContextEvent) - #assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - #assert event.context_id.context_uuid.uuid == context_uuid + event = events_collector.get_event(block=True, timeout=1.0) + assert isinstance(event, ContextEvent) + assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert event.context_id.context_uuid.uuid == context_uuid # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- - #events_collector.stop() + events_collector.stop() -- GitLab From ca0c74eb484640375be5f728318f446e022c9ba4 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Fri, 13 Jan 2023 15:59:22 +0000 Subject: [PATCH 045/158] Context: - added missing requirement - corrected unitary test Topology --- src/context/requirements.in | 1 + src/context/tests/test_topology.py | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/src/context/requirements.in b/src/context/requirements.in index 83ae02faf..e4bb209c7 100644 --- a/src/context/requirements.in +++ b/src/context/requirements.in @@ -1,3 +1,4 @@ +nats-py==2.2.0 psycopg2-binary==2.9.3 SQLAlchemy==1.4.40 sqlalchemy-cockroachdb==1.4.3 diff --git a/src/context/tests/test_topology.py b/src/context/tests/test_topology.py index 2e7e38cb1..c9fd68701 100644 --- a/src/context/tests/test_topology.py +++ b/src/context/tests/test_topology.py @@ -32,11 +32,13 @@ def test_topology(context_client : ContextClient) -> None: time.sleep(3) # wait for the events collector to start # ----- Prepare dependencies for the test and capture related events ----------------------------------------------- - context_client.SetContext(Context(**CONTEXT)) + response = context_client.SetContext(Context(**CONTEXT)) + context_uuid = response.context_uuid.uuid event = events_collector.get_event(block=True, timeout=1.0) assert isinstance(event, ContextEvent) assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert event.context_id.context_uuid.uuid == context_uuid # ----- Get when the object does not exist ------------------------------------------------------------------------- topology_id = TopologyId(**TOPOLOGY_ID) -- GitLab From 7730ad87a46dbbc0220d8b9445a1de16df8de225 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Fri, 13 Jan 2023 16:05:26 +0000 Subject: [PATCH 046/158] Context: - corrected CI/CD pipeline --- src/context/.gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/context/.gitlab-ci.yml b/src/context/.gitlab-ci.yml index 2a707004f..ba3b726dc 100644 --- a/src/context/.gitlab-ci.yml +++ b/src/context/.gitlab-ci.yml @@ -82,7 +82,7 @@ unit test context: --network=teraflowbridge $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG - docker ps -a - - sleep 3 + - sleep 5 - docker logs $IMAGE_NAME - > docker exec -i $IMAGE_NAME bash -c -- GitLab From 14341492a2bc6065084366a2f34106e39fe29c52 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Fri, 13 Jan 2023 16:09:31 +0000 Subject: [PATCH 047/158] Context: - corrected Topology unitary test --- src/context/tests/test_topology.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/context/tests/test_topology.py b/src/context/tests/test_topology.py index c9fd68701..49ec01625 100644 --- a/src/context/tests/test_topology.py +++ b/src/context/tests/test_topology.py @@ -25,7 +25,7 @@ def test_topology(context_client : ContextClient) -> None: # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- events_collector = EventsCollector( context_client, log_events_received=True, - activate_context_collector = False, activate_topology_collector = True, activate_device_collector = False, + activate_context_collector = True, activate_topology_collector = True, activate_device_collector = False, activate_link_collector = False, activate_service_collector = False, activate_slice_collector = False, activate_connection_collector = False) events_collector.start() -- GitLab From b29a7438ea3a51e23bd67bd2ff4ad16d1f097ddb Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Fri, 13 Jan 2023 16:31:03 +0000 Subject: [PATCH 048/158] Context: - corrected run_tests_locally script - solved formatting issue with Database Engine error logging - minor type hinting corrections - activated event notifications for Device, EndPoint and ConfigRule --- scripts/run_tests_locally-context.sh | 4 +- .../service/ContextServiceServicerImpl.py | 21 +++-- src/context/service/database/ConfigRule.py | 27 +++++- src/context/service/database/Device.py | 57 +++++++----- src/context/service/database/Engine.py | 4 +- src/context/service/database/Topology.py | 2 +- .../database/models/ConfigRuleModel.py | 4 +- .../service/database/models/DeviceModel.py | 12 ++- .../service/database/models/EndPointModel.py | 4 +- src/context/tests/test_device.py | 91 ++++++++----------- 10 files changed, 126 insertions(+), 100 deletions(-) diff --git a/scripts/run_tests_locally-context.sh b/scripts/run_tests_locally-context.sh index 0124469ec..8c0b300b7 100755 --- a/scripts/run_tests_locally-context.sh +++ b/scripts/run_tests_locally-context.sh @@ -60,9 +60,7 @@ export NATS_URI="nats://tfs:tfs123@172.254.254.11:4222" export PYTHONPATH=/home/tfs/tfs-ctrl/src # helpful pytest flags: --log-level=INFO -o log_cli=true --verbose --maxfail=1 --durations=0 coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose --maxfail=1 \ - context/tests/test_context.py \ - context/tests/test_topology.py - #context/tests/test_*.py + context/tests/test_*.py echo echo "Coverage report:" diff --git a/src/context/service/ContextServiceServicerImpl.py b/src/context/service/ContextServiceServicerImpl.py index 1528d64d9..95cda2c29 100644 --- a/src/context/service/ContextServiceServicerImpl.py +++ b/src/context/service/ContextServiceServicerImpl.py @@ -130,28 +130,29 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def ListDeviceIds(self, request : Empty, context : grpc.ServicerContext) -> DeviceIdList: - return device_list_ids(self.db_engine) + return DeviceIdList(device_ids=device_list_ids(self.db_engine)) @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def ListDevices(self, request : Empty, context : grpc.ServicerContext) -> DeviceList: - return device_list_objs(self.db_engine) + return DeviceList(devices=device_list_objs(self.db_engine)) @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def GetDevice(self, request : ContextId, context : grpc.ServicerContext) -> Device: - return device_get(self.db_engine, request) + return Device(**device_get(self.db_engine, request)) @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def SetDevice(self, request : Device, context : grpc.ServicerContext) -> DeviceId: - device_id,updated = device_set(self.db_engine, request) # pylint: disable=unused-variable - #event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE - #notify_event(self.messagebroker, TOPIC_DEVICE, event_type, {'device_id': device_id}) - return device_id + device_id,updated = device_set(self.db_engine, request) + event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE + notify_event(self.messagebroker, TOPIC_DEVICE, event_type, {'device_id': device_id}) + return DeviceId(**device_id) @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def RemoveDevice(self, request : DeviceId, context : grpc.ServicerContext) -> Empty: - deleted = device_delete(self.db_engine, request) # pylint: disable=unused-variable - #if deleted: - # notify_event(self.messagebroker, TOPIC_DEVICE, EventTypeEnum.EVENTTYPE_REMOVE, {'device_id': request}) + device_id,deleted = device_delete(self.db_engine, request) + if deleted: + event_type = EventTypeEnum.EVENTTYPE_REMOVE + notify_event(self.messagebroker, TOPIC_DEVICE, event_type, {'device_id': device_id}) return Empty() @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) diff --git a/src/context/service/database/ConfigRule.py b/src/context/service/database/ConfigRule.py index 05dda20aa..f64e273bf 100644 --- a/src/context/service/database/ConfigRule.py +++ b/src/context/service/database/ConfigRule.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import datetime, logging from sqlalchemy import delete from sqlalchemy.dialects.postgresql import insert from sqlalchemy.orm import Session @@ -22,8 +23,10 @@ from .models.enums.ConfigAction import grpc_to_enum__config_action from .models.ConfigRuleModel import ConfigRuleKindEnum, ConfigRuleModel from .uuids._Builder import get_uuid_random +LOGGER = logging.getLogger(__name__) + def compose_config_rules_data( - config_rules : List[ConfigRule], + config_rules : List[ConfigRule], now : datetime.datetime, device_uuid : Optional[str] = None, service_uuid : Optional[str] = None, slice_uuid : Optional[str] = None ) -> List[Dict]: dict_config_rules : List[Dict] = list() @@ -36,6 +39,8 @@ def compose_config_rules_data( 'kind' : ConfigRuleKindEnum._member_map_.get(str_kind.upper()), # pylint: disable=no-member 'action' : grpc_to_enum__config_action(config_rule.action), 'data' : grpc_message_to_json_string(getattr(config_rule, str_kind, {})), + 'created_at' : now, + 'updated_at' : now, } if device_uuid is not None: dict_config_rule['device_uuid' ] = device_uuid if service_uuid is not None: dict_config_rule['service_uuid'] = service_uuid @@ -45,16 +50,30 @@ def compose_config_rules_data( def upsert_config_rules( session : Session, config_rules : List[Dict], - device_uuid : Optional[str] = None, service_uuid : Optional[str] = None, slice_uuid : Optional[str] = None -) -> None: + device_uuid : Optional[str] = None, service_uuid : Optional[str] = None, slice_uuid : Optional[str] = None, +) -> bool: + # TODO: do not delete all rules; just add-remove as needed stmt = delete(ConfigRuleModel) if device_uuid is not None: stmt = stmt.where(ConfigRuleModel.device_uuid == device_uuid ) if service_uuid is not None: stmt = stmt.where(ConfigRuleModel.service_uuid == service_uuid) if slice_uuid is not None: stmt = stmt.where(ConfigRuleModel.slice_uuid == slice_uuid ) session.execute(stmt) + + updated = False if len(config_rules) > 0: - session.execute(insert(ConfigRuleModel).values(config_rules)) + stmt = insert(ConfigRuleModel).values(config_rules) + #stmt = stmt.on_conflict_do_update( + # index_elements=[ConfigRuleModel.configrule_uuid], + # set_=dict( + # updated_at = stmt.excluded.updated_at, + # ) + #) + stmt = stmt.returning(ConfigRuleModel.created_at, ConfigRuleModel.updated_at) + config_rule_updates = session.execute(stmt).fetchall() + LOGGER.warning('config_rule_updates = {:s}'.format(str(config_rule_updates))) + # TODO: updated = ... + return updated #Union_SpecificConfigRule = Union[ # ConfigRuleCustomModel, ConfigRuleAclModel diff --git a/src/context/service/database/Device.py b/src/context/service/database/Device.py index ccd991d7f..68369ac9d 100644 --- a/src/context/service/database/Device.py +++ b/src/context/service/database/Device.py @@ -12,15 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. +import datetime, logging from sqlalchemy.dialects.postgresql import insert from sqlalchemy.engine import Engine from sqlalchemy.orm import Session, sessionmaker from sqlalchemy_cockroachdb import run_transaction from typing import Dict, List, Optional, Set, Tuple -from common.proto.context_pb2 import Device, DeviceId, DeviceIdList, DeviceList from common.method_wrappers.ServiceExceptions import InvalidArgumentException, NotFoundException +from common.proto.context_pb2 import Device, DeviceId from common.tools.object_factory.Device import json_device_id -from context.service.database.ConfigRule import compose_config_rules_data, upsert_config_rules from .models.DeviceModel import DeviceModel from .models.EndPointModel import EndPointModel from .models.TopologyModel import TopologyDeviceModel @@ -29,22 +29,23 @@ from .models.enums.DeviceOperationalStatus import grpc_to_enum__device_operation from .models.enums.KpiSampleType import grpc_to_enum__kpi_sample_type from .uuids.Device import device_get_uuid from .uuids.EndPoint import endpoint_get_uuid +from .ConfigRule import compose_config_rules_data, upsert_config_rules -def device_list_ids(db_engine : Engine) -> DeviceIdList: +LOGGER = logging.getLogger(__name__) + +def device_list_ids(db_engine : Engine) -> List[Dict]: def callback(session : Session) -> List[Dict]: obj_list : List[DeviceModel] = session.query(DeviceModel).all() - #.options(selectinload(DeviceModel.topology)).filter_by(context_uuid=context_uuid).one_or_none() return [obj.dump_id() for obj in obj_list] - return DeviceIdList(device_ids=run_transaction(sessionmaker(bind=db_engine), callback)) + return run_transaction(sessionmaker(bind=db_engine), callback) -def device_list_objs(db_engine : Engine) -> DeviceList: +def device_list_objs(db_engine : Engine) -> List[Dict]: def callback(session : Session) -> List[Dict]: obj_list : List[DeviceModel] = session.query(DeviceModel).all() - #.options(selectinload(DeviceModel.topology)).filter_by(context_uuid=context_uuid).one_or_none() return [obj.dump() for obj in obj_list] - return DeviceList(devices=run_transaction(sessionmaker(bind=db_engine), callback)) + return run_transaction(sessionmaker(bind=db_engine), callback) -def device_get(db_engine : Engine, request : DeviceId) -> Device: +def device_get(db_engine : Engine, request : DeviceId) -> Dict: device_uuid = device_get_uuid(request, allow_random=False) def callback(session : Session) -> Optional[Dict]: obj : Optional[DeviceModel] = session.query(DeviceModel).filter_by(device_uuid=device_uuid).one_or_none() @@ -55,9 +56,9 @@ def device_get(db_engine : Engine, request : DeviceId) -> Device: raise NotFoundException('Device', raw_device_uuid, extra_details=[ 'device_uuid generated was: {:s}'.format(device_uuid) ]) - return Device(**obj) + return obj -def device_set(db_engine : Engine, request : Device) -> Tuple[DeviceId, bool]: +def device_set(db_engine : Engine, request : Device) -> Tuple[Dict, bool]: raw_device_uuid = request.device_id.device_uuid.uuid raw_device_name = request.name device_name = raw_device_uuid if len(raw_device_name) == 0 else raw_device_name @@ -67,6 +68,8 @@ def device_set(db_engine : Engine, request : Device) -> Tuple[DeviceId, bool]: oper_status = grpc_to_enum__device_operational_status(request.device_operational_status) device_drivers = [grpc_to_enum__device_driver(d) for d in request.device_drivers] + now = datetime.datetime.utcnow() + topology_uuids : Set[str] = set() related_topologies : List[Dict] = list() endpoints_data : List[Dict] = list() @@ -94,6 +97,8 @@ def device_set(db_engine : Engine, request : Device) -> Tuple[DeviceId, bool]: 'name' : endpoint_name, 'endpoint_type' : endpoint.endpoint_type, 'kpi_sample_types': kpi_sample_types, + 'created_at' : now, + 'updated_at' : now, }) if endpoint_topology_uuid not in topology_uuids: @@ -103,7 +108,7 @@ def device_set(db_engine : Engine, request : Device) -> Tuple[DeviceId, bool]: }) topology_uuids.add(endpoint_topology_uuid) - config_rules = compose_config_rules_data(request.device_config.config_rules, device_uuid=device_uuid) + config_rules = compose_config_rules_data(request.device_config.config_rules, now, device_uuid=device_uuid) device_data = [{ 'device_uuid' : device_uuid, @@ -111,9 +116,11 @@ def device_set(db_engine : Engine, request : Device) -> Tuple[DeviceId, bool]: 'device_type' : device_type, 'device_operational_status': oper_status, 'device_drivers' : device_drivers, + 'created_at' : now, + 'updated_at' : now, }] - def callback(session : Session) -> None: + def callback(session : Session) -> bool: stmt = insert(DeviceModel).values(device_data) stmt = stmt.on_conflict_do_update( index_elements=[DeviceModel.device_uuid], @@ -122,9 +129,12 @@ def device_set(db_engine : Engine, request : Device) -> Tuple[DeviceId, bool]: device_type = stmt.excluded.device_type, device_operational_status = stmt.excluded.device_operational_status, device_drivers = stmt.excluded.device_drivers, + updated_at = stmt.excluded.updated_at, ) ) - session.execute(stmt) + stmt = stmt.returning(DeviceModel.created_at, DeviceModel.updated_at) + created_at,updated_at = session.execute(stmt).fetchone() + updated = updated_at > created_at stmt = insert(EndPointModel).values(endpoints_data) stmt = stmt.on_conflict_do_update( @@ -133,23 +143,28 @@ def device_set(db_engine : Engine, request : Device) -> Tuple[DeviceId, bool]: name = stmt.excluded.name, endpoint_type = stmt.excluded.endpoint_type, kpi_sample_types = stmt.excluded.kpi_sample_types, + updated_at = stmt.excluded.updated_at, ) ) - session.execute(stmt) + stmt = stmt.returning(EndPointModel.created_at, EndPointModel.updated_at) + endpoint_updates = session.execute(stmt).fetchall() + LOGGER.warning('endpoint_updates = {:s}'.format(str(endpoint_updates))) session.execute(insert(TopologyDeviceModel).values(related_topologies).on_conflict_do_nothing( index_elements=[TopologyDeviceModel.topology_uuid, TopologyDeviceModel.device_uuid] )) - upsert_config_rules(session, config_rules, device_uuid=device_uuid) + configrules_updated = upsert_config_rules(session, config_rules, device_uuid=device_uuid) + + return updated - run_transaction(sessionmaker(bind=db_engine), callback) - updated = False # TODO: improve and check if created/updated - return DeviceId(**json_device_id(device_uuid)),updated + updated = run_transaction(sessionmaker(bind=db_engine), callback) + return json_device_id(device_uuid),updated -def device_delete(db_engine : Engine, request : DeviceId) -> bool: +def device_delete(db_engine : Engine, request : DeviceId) -> Tuple[Dict, bool]: device_uuid = device_get_uuid(request, allow_random=False) def callback(session : Session) -> bool: num_deleted = session.query(DeviceModel).filter_by(device_uuid=device_uuid).delete() return num_deleted > 0 - return run_transaction(sessionmaker(bind=db_engine), callback) + deleted = run_transaction(sessionmaker(bind=db_engine), callback) + return json_device_id(device_uuid),deleted diff --git a/src/context/service/database/Engine.py b/src/context/service/database/Engine.py index a1aedc3ae..c507efc72 100644 --- a/src/context/service/database/Engine.py +++ b/src/context/service/database/Engine.py @@ -29,13 +29,13 @@ class Engine: engine = sqlalchemy.create_engine( crdb_uri, connect_args={'application_name': APP_NAME}, echo=ECHO, future=True) except: # pylint: disable=bare-except # pragma: no cover - LOGGER.exception('Failed to connect to database: {:s}'.format(crdb_uri)) + LOGGER.exception('Failed to connect to database: {:s}'.format(str(crdb_uri))) return None try: Engine.create_database(engine) except: # pylint: disable=bare-except # pragma: no cover - LOGGER.exception('Failed to check/create to database: {:s}'.format(engine.url)) + LOGGER.exception('Failed to check/create to database: {:s}'.format(str(crdb_uri))) return None return engine diff --git a/src/context/service/database/Topology.py b/src/context/service/database/Topology.py index 75fc229d8..fcd93e6bb 100644 --- a/src/context/service/database/Topology.py +++ b/src/context/service/database/Topology.py @@ -82,7 +82,7 @@ def topology_set(db_engine : Engine, request : Topology) -> Tuple[Dict, bool]: 'updated_at' : now, }] - def callback(session : Session) -> None: + def callback(session : Session) -> bool: stmt = insert(TopologyModel).values(topology_data) stmt = stmt.on_conflict_do_update( index_elements=[TopologyModel.topology_uuid], diff --git a/src/context/service/database/models/ConfigRuleModel.py b/src/context/service/database/models/ConfigRuleModel.py index c2baa8df6..a697de556 100644 --- a/src/context/service/database/models/ConfigRuleModel.py +++ b/src/context/service/database/models/ConfigRuleModel.py @@ -13,7 +13,7 @@ # limitations under the License. import enum, json -from sqlalchemy import CheckConstraint, Column, Enum, ForeignKey, Integer, String +from sqlalchemy import CheckConstraint, Column, DateTime, Enum, ForeignKey, Integer, String from sqlalchemy.dialects.postgresql import UUID from typing import Dict from .enums.ConfigAction import ORM_ConfigActionEnum @@ -35,6 +35,8 @@ class ConfigRuleModel(_Base): kind = Column(Enum(ConfigRuleKindEnum), nullable=False) action = Column(Enum(ORM_ConfigActionEnum), nullable=False) data = Column(String, nullable=False) + created_at = Column(DateTime) + updated_at = Column(DateTime) __table_args__ = ( CheckConstraint(position >= 0, name='check_position_value'), diff --git a/src/context/service/database/models/DeviceModel.py b/src/context/service/database/models/DeviceModel.py index 2deb688e1..ef56c7158 100644 --- a/src/context/service/database/models/DeviceModel.py +++ b/src/context/service/database/models/DeviceModel.py @@ -13,7 +13,7 @@ # limitations under the License. import operator -from sqlalchemy import Column, Enum, String +from sqlalchemy import Column, DateTime, Enum, String from sqlalchemy.dialects.postgresql import ARRAY, UUID from sqlalchemy.orm import relationship from typing import Dict @@ -24,11 +24,13 @@ from ._Base import _Base class DeviceModel(_Base): __tablename__ = 'device' - device_uuid = Column(UUID(as_uuid=False), primary_key=True) - device_name = Column(String, nullable=False) - device_type = Column(String, nullable=False) + device_uuid = Column(UUID(as_uuid=False), primary_key=True) + device_name = Column(String, nullable=False) + device_type = Column(String, nullable=False) device_operational_status = Column(Enum(ORM_DeviceOperationalStatusEnum), nullable=False) - device_drivers = Column(ARRAY(Enum(ORM_DeviceDriverEnum), dimensions=1)) + device_drivers = Column(ARRAY(Enum(ORM_DeviceDriverEnum), dimensions=1)) + created_at = Column(DateTime) + updated_at = Column(DateTime) #topology_devices = relationship('TopologyDeviceModel', back_populates='device') config_rules = relationship('ConfigRuleModel', passive_deletes=True) # lazy='joined', back_populates='device' diff --git a/src/context/service/database/models/EndPointModel.py b/src/context/service/database/models/EndPointModel.py index 4151cfe0d..abc16c1af 100644 --- a/src/context/service/database/models/EndPointModel.py +++ b/src/context/service/database/models/EndPointModel.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from sqlalchemy import Column, Enum, ForeignKey, String +from sqlalchemy import Column, DateTime, Enum, ForeignKey, String from sqlalchemy.dialects.postgresql import ARRAY, UUID from sqlalchemy.orm import relationship from typing import Dict @@ -28,6 +28,8 @@ class EndPointModel(_Base): name = Column(String, nullable=False) endpoint_type = Column(String, nullable=False) kpi_sample_types = Column(ARRAY(Enum(ORM_KpiSampleTypeEnum), dimensions=1)) + created_at = Column(DateTime) + updated_at = Column(DateTime) device = relationship('DeviceModel', back_populates='endpoints') topology = relationship('TopologyModel') diff --git a/src/context/tests/test_device.py b/src/context/tests/test_device.py index e53ad747c..b009a5e45 100644 --- a/src/context/tests/test_device.py +++ b/src/context/tests/test_device.py @@ -12,24 +12,25 @@ # See the License for the specific language governing permissions and # limitations under the License. -import copy, grpc, pytest +import copy, grpc, pytest, time from common.proto.context_pb2 import ( - Context, ContextId, Device, DeviceDriverEnum, DeviceId, DeviceOperationalStatusEnum, Empty, Topology, TopologyId) + Context, ContextEvent, ContextId, Device, DeviceDriverEnum, DeviceEvent, DeviceId, DeviceOperationalStatusEnum, Empty, EventTypeEnum, Topology, TopologyEvent, TopologyId) from context.client.ContextClient import ContextClient from context.service.database.uuids.Device import device_get_uuid -#from context.client.EventsCollector import EventsCollector +from context.client.EventsCollector import EventsCollector from .Objects import CONTEXT, CONTEXT_ID, DEVICE_R1, DEVICE_R1_ID, DEVICE_R1_NAME, TOPOLOGY, TOPOLOGY_ID @pytest.mark.depends(on=['context/tests/test_topology.py::test_topology']) def test_device(context_client : ContextClient) -> None: # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- - #events_collector = EventsCollector( - # context_client, log_events_received=True, - # activate_context_collector = False, activate_topology_collector = False, activate_device_collector = True, - # activate_link_collector = False, activate_service_collector = False, activate_slice_collector = False, - # activate_connection_collector = False) - #events_collector.start() + events_collector = EventsCollector( + context_client, log_events_received=True, + activate_context_collector = False, activate_topology_collector = False, activate_device_collector = True, + activate_link_collector = False, activate_service_collector = False, activate_slice_collector = False, + activate_connection_collector = False) + events_collector.start() + time.sleep(3) # ----- Prepare dependencies for the test and capture related events ----------------------------------------------- response = context_client.SetContext(Context(**CONTEXT)) @@ -38,14 +39,14 @@ def test_device(context_client : ContextClient) -> None: response = context_client.SetTopology(Topology(**TOPOLOGY)) topology_uuid = response.topology_uuid.uuid - #events = events_collector.get_events(block=True, count=2) - #assert isinstance(events[0], ContextEvent) - #assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - #assert events[0].context_id.context_uuid.uuid == context_uuid - #assert isinstance(events[1], TopologyEvent) - #assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - #assert events[1].topology_id.context_id.context_uuid.uuid == context_uuid - #assert events[1].topology_id.topology_uuid.uuid == topology_uuid + events = events_collector.get_events(block=True, count=2, timeout=1.0) + assert isinstance(events[0], ContextEvent) + assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert events[0].context_id.context_uuid.uuid == context_uuid + assert isinstance(events[1], TopologyEvent) + assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert events[1].topology_id.context_id.context_uuid.uuid == context_uuid + assert events[1].topology_id.topology_uuid.uuid == topology_uuid # ----- Get when the object does not exist ------------------------------------------------------------------------- device_id = DeviceId(**DEVICE_R1_ID) @@ -78,10 +79,10 @@ def test_device(context_client : ContextClient) -> None: assert response.device_uuid.uuid == device_uuid # ----- Check create event ----------------------------------------------------------------------------------------- - #event = events_collector.get_event(block=True) - #assert isinstance(event, DeviceEvent) - #assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE - #assert event.device_id.device_uuid.uuid == device_uuid + event = events_collector.get_event(block=True, timeout=1.0) + assert isinstance(event, DeviceEvent) + assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert event.device_id.device_uuid.uuid == device_uuid # ----- Get when the object exists --------------------------------------------------------------------------------- response = context_client.GetDevice(DeviceId(**DEVICE_R1_ID)) @@ -121,10 +122,10 @@ def test_device(context_client : ContextClient) -> None: assert response.device_uuid.uuid == device_uuid # ----- Check update event ----------------------------------------------------------------------------------------- - #event = events_collector.get_event(block=True) - #assert isinstance(event, DeviceEvent) - #assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - #assert event.device_id.device_uuid.uuid == device_uuid + event = events_collector.get_event(block=True, timeout=1.0) + assert isinstance(event, DeviceEvent) + assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + assert event.device_id.device_uuid.uuid == device_uuid # ----- Get when the object is modified ---------------------------------------------------------------------------- response = context_client.GetDevice(DeviceId(**DEVICE_R1_ID)) @@ -155,20 +156,6 @@ def test_device(context_client : ContextClient) -> None: assert DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG in response.devices[0].device_drivers assert len(response.devices[0].device_endpoints) == 4 - # ----- Create object relation ------------------------------------------------------------------------------------- - #TOPOLOGY_WITH_DEVICE = copy.deepcopy(TOPOLOGY) - #TOPOLOGY_WITH_DEVICE['device_ids'].append(DEVICE_R1_ID) - #response = context_client.SetTopology(Topology(**TOPOLOGY_WITH_DEVICE)) - #assert response.context_id.context_uuid.uuid == context_uuid - #assert response.topology_uuid.uuid == topology_uuid - - # ----- Check update event ----------------------------------------------------------------------------------------- - #event = events_collector.get_event(block=True) - #assert isinstance(event, TopologyEvent) - #assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - #assert response.context_id.context_uuid.uuid == context_uuid - #assert response.topology_uuid.uuid == topology_uuid - # ----- Check relation was created --------------------------------------------------------------------------------- response = context_client.GetTopology(TopologyId(**TOPOLOGY_ID)) assert response.topology_id.context_id.context_uuid.uuid == context_uuid @@ -181,10 +168,10 @@ def test_device(context_client : ContextClient) -> None: context_client.RemoveDevice(DeviceId(**DEVICE_R1_ID)) # ----- Check remove event ----------------------------------------------------------------------------------------- - #event = events_collector.get_event(block=True) - #assert isinstance(event, DeviceEvent) - #assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - #assert event.device_id.device_uuid.uuid == device_uuid + event = events_collector.get_event(block=True, timeout=1.0) + assert isinstance(event, DeviceEvent) + assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert event.device_id.device_uuid.uuid == device_uuid # ----- List after deleting the object ----------------------------------------------------------------------------- response = context_client.ListDeviceIds(Empty()) @@ -203,14 +190,14 @@ def test_device(context_client : ContextClient) -> None: context_client.RemoveTopology(TopologyId(**TOPOLOGY_ID)) context_client.RemoveContext(ContextId(**CONTEXT_ID)) - #events = events_collector.get_events(block=True, count=2) - #assert isinstance(events[0], TopologyEvent) - #assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - #assert events[0].topology_id.context_id.context_uuid.uuid == context_uuid - #assert events[0].topology_id.topology_uuid.uuid == topology_uuid - #assert isinstance(events[1], ContextEvent) - #assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - #assert events[1].context_id.context_uuid.uuid == context_uuid + events = events_collector.get_events(block=True, count=2, timeout=1.0) + assert isinstance(events[0], TopologyEvent) + assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert events[0].topology_id.context_id.context_uuid.uuid == context_uuid + assert events[0].topology_id.topology_uuid.uuid == topology_uuid + assert isinstance(events[1], ContextEvent) + assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert events[1].context_id.context_uuid.uuid == context_uuid # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- - #events_collector.stop() + events_collector.stop() -- GitLab From 5f50df51bf2dd31aa3c50ccc8c79ba9adf05e3e0 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Fri, 13 Jan 2023 16:52:56 +0000 Subject: [PATCH 049/158] Context: - cosmetic changes - activated correct events to collect for device unitary test --- src/context/tests/test_device.py | 5 +++-- src/context/tests/test_topology.py | 3 ++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/src/context/tests/test_device.py b/src/context/tests/test_device.py index b009a5e45..4080cfcac 100644 --- a/src/context/tests/test_device.py +++ b/src/context/tests/test_device.py @@ -14,7 +14,8 @@ import copy, grpc, pytest, time from common.proto.context_pb2 import ( - Context, ContextEvent, ContextId, Device, DeviceDriverEnum, DeviceEvent, DeviceId, DeviceOperationalStatusEnum, Empty, EventTypeEnum, Topology, TopologyEvent, TopologyId) + Context, ContextEvent, ContextId, Device, DeviceDriverEnum, DeviceEvent, DeviceId, DeviceOperationalStatusEnum, + Empty, EventTypeEnum, Topology, TopologyEvent, TopologyId) from context.client.ContextClient import ContextClient from context.service.database.uuids.Device import device_get_uuid from context.client.EventsCollector import EventsCollector @@ -26,7 +27,7 @@ def test_device(context_client : ContextClient) -> None: # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- events_collector = EventsCollector( context_client, log_events_received=True, - activate_context_collector = False, activate_topology_collector = False, activate_device_collector = True, + activate_context_collector = True, activate_topology_collector = True, activate_device_collector = True, activate_link_collector = False, activate_service_collector = False, activate_slice_collector = False, activate_connection_collector = False) events_collector.start() diff --git a/src/context/tests/test_topology.py b/src/context/tests/test_topology.py index 49ec01625..311e0f874 100644 --- a/src/context/tests/test_topology.py +++ b/src/context/tests/test_topology.py @@ -13,7 +13,8 @@ # limitations under the License. import copy, grpc, pytest, time -from common.proto.context_pb2 import Context, ContextEvent, ContextId, EventTypeEnum, Topology, TopologyEvent, TopologyId +from common.proto.context_pb2 import ( + Context, ContextEvent, ContextId, EventTypeEnum, Topology, TopologyEvent, TopologyId) from context.client.ContextClient import ContextClient from context.service.database.uuids.Topology import topology_get_uuid from context.client.EventsCollector import EventsCollector -- GitLab From 3cdbe036829e1f4142e81b285387bfc4c5659b44 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Fri, 13 Jan 2023 17:10:54 +0000 Subject: [PATCH 050/158] Context: - corrected report of config rules updated - corrected update notifications for Device - removed unneeded log messages - migrated events for Link entity --- .../service/ContextServiceServicerImpl.py | 21 ++-- src/context/service/database/ConfigRule.py | 8 +- src/context/service/database/Device.py | 5 +- src/context/service/database/Link.py | 55 ++++---- .../service/database/models/LinkModel.py | 4 +- src/context/tests/test_link.py | 117 ++++++++---------- 6 files changed, 107 insertions(+), 103 deletions(-) diff --git a/src/context/service/ContextServiceServicerImpl.py b/src/context/service/ContextServiceServicerImpl.py index 95cda2c29..5c9565859 100644 --- a/src/context/service/ContextServiceServicerImpl.py +++ b/src/context/service/ContextServiceServicerImpl.py @@ -165,28 +165,29 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def ListLinkIds(self, request : Empty, context : grpc.ServicerContext) -> LinkIdList: - return link_list_ids(self.db_engine) + return LinkIdList(link_ids=link_list_ids(self.db_engine)) @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def ListLinks(self, request : Empty, context : grpc.ServicerContext) -> LinkList: - return link_list_objs(self.db_engine) + return LinkList(links=link_list_objs(self.db_engine)) @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def GetLink(self, request : LinkId, context : grpc.ServicerContext) -> Link: - return link_get(self.db_engine, request) + return Link(**link_get(self.db_engine, request)) @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def SetLink(self, request : Link, context : grpc.ServicerContext) -> LinkId: - link_id,updated = link_set(self.db_engine, request) # pylint: disable=unused-variable - #event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE - #notify_event(self.messagebroker, TOPIC_LINK, event_type, {'link_id': link_id}) - return link_id + link_id,updated = link_set(self.db_engine, request) + event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE + notify_event(self.messagebroker, TOPIC_LINK, event_type, {'link_id': link_id}) + return LinkId(**link_id) @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def RemoveLink(self, request : LinkId, context : grpc.ServicerContext) -> Empty: - deleted = link_delete(self.db_engine, request) # pylint: disable=unused-variable - #if deleted: - # notify_event(self.messagebroker, TOPIC_LINK, EventTypeEnum.EVENTTYPE_REMOVE, {'link_id': request}) + link_id,deleted = link_delete(self.db_engine, request) + if deleted: + event_type = EventTypeEnum.EVENTTYPE_REMOVE + notify_event(self.messagebroker, TOPIC_LINK, event_type, {'link_id': link_id}) return Empty() @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) diff --git a/src/context/service/database/ConfigRule.py b/src/context/service/database/ConfigRule.py index f64e273bf..5f701386f 100644 --- a/src/context/service/database/ConfigRule.py +++ b/src/context/service/database/ConfigRule.py @@ -59,7 +59,7 @@ def upsert_config_rules( if slice_uuid is not None: stmt = stmt.where(ConfigRuleModel.slice_uuid == slice_uuid ) session.execute(stmt) - updated = False + configrule_updates = [] if len(config_rules) > 0: stmt = insert(ConfigRuleModel).values(config_rules) #stmt = stmt.on_conflict_do_update( @@ -69,11 +69,9 @@ def upsert_config_rules( # ) #) stmt = stmt.returning(ConfigRuleModel.created_at, ConfigRuleModel.updated_at) - config_rule_updates = session.execute(stmt).fetchall() - LOGGER.warning('config_rule_updates = {:s}'.format(str(config_rule_updates))) - # TODO: updated = ... + configrule_updates = session.execute(stmt).fetchall() - return updated + return configrule_updates #Union_SpecificConfigRule = Union[ # ConfigRuleCustomModel, ConfigRuleAclModel diff --git a/src/context/service/database/Device.py b/src/context/service/database/Device.py index 68369ac9d..e40c28e69 100644 --- a/src/context/service/database/Device.py +++ b/src/context/service/database/Device.py @@ -148,13 +148,14 @@ def device_set(db_engine : Engine, request : Device) -> Tuple[Dict, bool]: ) stmt = stmt.returning(EndPointModel.created_at, EndPointModel.updated_at) endpoint_updates = session.execute(stmt).fetchall() - LOGGER.warning('endpoint_updates = {:s}'.format(str(endpoint_updates))) + updated = updated or any([(updated_at > created_at) for created_at,updated_at in endpoint_updates]) session.execute(insert(TopologyDeviceModel).values(related_topologies).on_conflict_do_nothing( index_elements=[TopologyDeviceModel.topology_uuid, TopologyDeviceModel.device_uuid] )) - configrules_updated = upsert_config_rules(session, config_rules, device_uuid=device_uuid) + configrule_updates = upsert_config_rules(session, config_rules, device_uuid=device_uuid) + updated = updated or any([(updated_at > created_at) for created_at,updated_at in configrule_updates]) return updated diff --git a/src/context/service/database/Link.py b/src/context/service/database/Link.py index c21dd6714..2621e73dc 100644 --- a/src/context/service/database/Link.py +++ b/src/context/service/database/Link.py @@ -12,12 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. +import datetime, logging from sqlalchemy.dialects.postgresql import insert from sqlalchemy.engine import Engine from sqlalchemy.orm import Session, sessionmaker from sqlalchemy_cockroachdb import run_transaction from typing import Dict, List, Optional, Set, Tuple -from common.proto.context_pb2 import Link, LinkId, LinkIdList, LinkList +from common.proto.context_pb2 import Link, LinkId from common.method_wrappers.ServiceExceptions import NotFoundException from common.tools.object_factory.Link import json_link_id from .models.LinkModel import LinkModel, LinkEndPointModel @@ -25,21 +26,21 @@ from .models.TopologyModel import TopologyLinkModel from .uuids.EndPoint import endpoint_get_uuid from .uuids.Link import link_get_uuid -def link_list_ids(db_engine : Engine) -> LinkIdList: +LOGGER = logging.getLogger(__name__) + +def link_list_ids(db_engine : Engine) -> List[Dict]: def callback(session : Session) -> List[Dict]: obj_list : List[LinkModel] = session.query(LinkModel).all() - #.options(selectinload(LinkModel.topology)).filter_by(context_uuid=context_uuid).one_or_none() return [obj.dump_id() for obj in obj_list] - return LinkIdList(link_ids=run_transaction(sessionmaker(bind=db_engine), callback)) + return run_transaction(sessionmaker(bind=db_engine), callback) -def link_list_objs(db_engine : Engine) -> LinkList: +def link_list_objs(db_engine : Engine) -> List[Dict]: def callback(session : Session) -> List[Dict]: obj_list : List[LinkModel] = session.query(LinkModel).all() - #.options(selectinload(LinkModel.topology)).filter_by(context_uuid=context_uuid).one_or_none() return [obj.dump() for obj in obj_list] - return LinkList(links=run_transaction(sessionmaker(bind=db_engine), callback)) + return run_transaction(sessionmaker(bind=db_engine), callback) -def link_get(db_engine : Engine, request : LinkId) -> Link: +def link_get(db_engine : Engine, request : LinkId) -> Dict: link_uuid = link_get_uuid(request, allow_random=False) def callback(session : Session) -> Optional[Dict]: obj : Optional[LinkModel] = session.query(LinkModel).filter_by(link_uuid=link_uuid).one_or_none() @@ -50,14 +51,16 @@ def link_get(db_engine : Engine, request : LinkId) -> Link: raise NotFoundException('Link', raw_link_uuid, extra_details=[ 'link_uuid generated was: {:s}'.format(link_uuid) ]) - return Link(**obj) + return obj -def link_set(db_engine : Engine, request : Link) -> Tuple[LinkId, bool]: +def link_set(db_engine : Engine, request : Link) -> Tuple[Dict, bool]: raw_link_uuid = request.link_id.link_uuid.uuid raw_link_name = request.name link_name = raw_link_uuid if len(raw_link_name) == 0 else raw_link_name link_uuid = link_get_uuid(request.link_id, link_name=link_name, allow_random=True) + now = datetime.datetime.utcnow() + topology_uuids : Set[str] = set() related_topologies : List[Dict] = list() link_endpoints_data : List[Dict] = list() @@ -73,23 +76,31 @@ def link_set(db_engine : Engine, request : Link) -> Tuple[LinkId, bool]: if endpoint_topology_uuid not in topology_uuids: related_topologies.append({ 'topology_uuid': endpoint_topology_uuid, - 'link_uuid': link_uuid, + 'link_uuid' : link_uuid, }) topology_uuids.add(endpoint_topology_uuid) link_data = [{ - 'link_uuid': link_uuid, - 'link_name': link_name, + 'link_uuid' : link_uuid, + 'link_name' : link_name, + 'created_at': now, + 'updated_at': now, }] - def callback(session : Session) -> None: + def callback(session : Session) -> bool: stmt = insert(LinkModel).values(link_data) stmt = stmt.on_conflict_do_update( index_elements=[LinkModel.link_uuid], - set_=dict(link_name = stmt.excluded.link_name) + set_=dict( + link_name = stmt.excluded.link_name, + updated_at = stmt.excluded.updated_at, + ) ) - session.execute(stmt) + stmt = stmt.returning(LinkModel.created_at, LinkModel.updated_at) + created_at,updated_at = session.execute(stmt).fetchone() + updated = updated_at > created_at + # TODO: manage add/remove of endpoints; manage changes in relations with topology stmt = insert(LinkEndPointModel).values(link_endpoints_data) stmt = stmt.on_conflict_do_nothing( index_elements=[LinkEndPointModel.link_uuid, LinkEndPointModel.endpoint_uuid] @@ -100,13 +111,15 @@ def link_set(db_engine : Engine, request : Link) -> Tuple[LinkId, bool]: index_elements=[TopologyLinkModel.topology_uuid, TopologyLinkModel.link_uuid] )) - run_transaction(sessionmaker(bind=db_engine), callback) - updated = False # TODO: improve and check if created/updated - return LinkId(**json_link_id(link_uuid)),updated + return updated -def link_delete(db_engine : Engine, request : LinkId) -> bool: + updated = run_transaction(sessionmaker(bind=db_engine), callback) + return json_link_id(link_uuid),updated + +def link_delete(db_engine : Engine, request : LinkId) -> Tuple[Dict, bool]: link_uuid = link_get_uuid(request, allow_random=False) def callback(session : Session) -> bool: num_deleted = session.query(LinkModel).filter_by(link_uuid=link_uuid).delete() return num_deleted > 0 - return run_transaction(sessionmaker(bind=db_engine), callback) + deleted = run_transaction(sessionmaker(bind=db_engine), callback) + return json_link_id(link_uuid),deleted diff --git a/src/context/service/database/models/LinkModel.py b/src/context/service/database/models/LinkModel.py index ecad01972..a13f61bf3 100644 --- a/src/context/service/database/models/LinkModel.py +++ b/src/context/service/database/models/LinkModel.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from sqlalchemy import Column, ForeignKey, String +from sqlalchemy import Column, DateTime, ForeignKey, String from sqlalchemy.dialects.postgresql import UUID from sqlalchemy.orm import relationship from typing import Dict @@ -23,6 +23,8 @@ class LinkModel(_Base): link_uuid = Column(UUID(as_uuid=False), primary_key=True) link_name = Column(String, nullable=False) + created_at = Column(DateTime) + updated_at = Column(DateTime) #topology_links = relationship('TopologyLinkModel', back_populates='link') link_endpoints = relationship('LinkEndPointModel') # lazy='joined', back_populates='link' diff --git a/src/context/tests/test_link.py b/src/context/tests/test_link.py index ec767f1c9..5167c41b8 100644 --- a/src/context/tests/test_link.py +++ b/src/context/tests/test_link.py @@ -12,10 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -import copy, grpc, pytest -from common.proto.context_pb2 import Context, ContextId, Device, DeviceId, Empty, Link, LinkId, Topology, TopologyId +import copy, grpc, pytest, time +from common.proto.context_pb2 import ( + Context, ContextEvent, ContextId, Device, DeviceEvent, DeviceId, Empty, EventTypeEnum, Link, LinkEvent, LinkId, + Topology, TopologyEvent, TopologyId) from context.client.ContextClient import ContextClient -#from context.client.EventsCollector import EventsCollector +from context.client.EventsCollector import EventsCollector from context.service.database.uuids.Link import link_get_uuid from .Objects import ( CONTEXT, CONTEXT_ID, DEVICE_R1, DEVICE_R1_ID, DEVICE_R2, DEVICE_R2_ID, LINK_R1_R2, LINK_R1_R2_ID, LINK_R1_R2_NAME, @@ -25,12 +27,13 @@ from .Objects import ( def test_link(context_client : ContextClient) -> None: # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- - #events_collector = EventsCollector( - # context_client, log_events_received=True, - # activate_context_collector = False, activate_topology_collector = False, activate_device_collector = False, - # activate_link_collector = True, activate_service_collector = False, activate_slice_collector = False, - # activate_connection_collector = False) - #events_collector.start() + events_collector = EventsCollector( + context_client, log_events_received=True, + activate_context_collector = True, activate_topology_collector = True, activate_device_collector = True, + activate_link_collector = True, activate_service_collector = False, activate_slice_collector = False, + activate_connection_collector = False) + events_collector.start() + time.sleep(3) # ----- Prepare dependencies for the test and capture related events ----------------------------------------------- response = context_client.SetContext(Context(**CONTEXT)) @@ -45,20 +48,20 @@ def test_link(context_client : ContextClient) -> None: response = context_client.SetDevice(Device(**DEVICE_R2)) device_r2_uuid = response.device_uuid.uuid - # events = events_collector.get_events(block=True, count=4) - # assert isinstance(events[0], ContextEvent) - # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - # assert events[0].context_id.context_uuid.uuid == context_uuid - # assert isinstance(events[1], TopologyEvent) - # assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - # assert events[1].topology_id.context_id.context_uuid.uuid == context_uuid - # assert events[1].topology_id.topology_uuid.uuid == topology_uuid - # assert isinstance(events[2], DeviceEvent) - # assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - # assert events[2].device_id.device_uuid.uuid == device_r1_uuid - # assert isinstance(events[3], DeviceEvent) - # assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - # assert events[3].device_id.device_uuid.uuid == device_r2_uuid + events = events_collector.get_events(block=True, count=4, timeout=1.0) + assert isinstance(events[0], ContextEvent) + assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert events[0].context_id.context_uuid.uuid == context_uuid + assert isinstance(events[1], TopologyEvent) + assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert events[1].topology_id.context_id.context_uuid.uuid == context_uuid + assert events[1].topology_id.topology_uuid.uuid == topology_uuid + assert isinstance(events[2], DeviceEvent) + assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert events[2].device_id.device_uuid.uuid == device_r1_uuid + assert isinstance(events[3], DeviceEvent) + assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert events[3].device_id.device_uuid.uuid == device_r2_uuid # ----- Get when the object does not exist ------------------------------------------------------------------------- link_id = LinkId(**LINK_R1_R2_ID) @@ -81,10 +84,10 @@ def test_link(context_client : ContextClient) -> None: assert response.link_uuid.uuid == link_uuid # ----- Check create event ----------------------------------------------------------------------------------------- - #event = events_collector.get_event(block=True) - #assert isinstance(event, LinkEvent) - #assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE - #assert event.link_id.link_uuid.uuid == link_uuid + event = events_collector.get_event(block=True, timeout=1.0) + assert isinstance(event, LinkEvent) + assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert event.link_id.link_uuid.uuid == link_uuid # ----- Get when the object exists --------------------------------------------------------------------------------- response = context_client.GetLink(LinkId(**LINK_R1_R2_ID)) @@ -111,10 +114,10 @@ def test_link(context_client : ContextClient) -> None: assert response.link_uuid.uuid == link_uuid # ----- Check update event ----------------------------------------------------------------------------------------- - #event = events_collector.get_event(block=True) - #assert isinstance(event, LinkEvent) - #assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - #assert event.link_id.link_uuid.uuid == link_uuid + event = events_collector.get_event(block=True, timeout=1.0) + assert isinstance(event, LinkEvent) + assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + assert event.link_id.link_uuid.uuid == link_uuid # ----- Get when the object is modified ---------------------------------------------------------------------------- response = context_client.GetLink(LinkId(**LINK_R1_R2_ID)) @@ -133,20 +136,6 @@ def test_link(context_client : ContextClient) -> None: assert response.links[0].name == new_link_name assert len(response.links[0].link_endpoint_ids) == 2 - # ----- Create object relation ------------------------------------------------------------------------------------- - #TOPOLOGY_WITH_LINK = copy.deepcopy(TOPOLOGY) - #TOPOLOGY_WITH_LINK['link_ids'].append(LINK_R1_R2_ID) - #response = context_client.SetTopology(Topology(**TOPOLOGY_WITH_LINK)) - #assert response.context_id.context_uuid.uuid == context_uuid - #assert response.topology_uuid.uuid == topology_uuid - - # ----- Check update event ----------------------------------------------------------------------------------------- - #event = events_collector.get_event(block=True) - #assert isinstance(event, TopologyEvent) - #assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - #assert response.context_id.context_uuid.uuid == context_uuid - #assert response.topology_uuid.uuid == topology_uuid - # ----- Check relation was created --------------------------------------------------------------------------------- response = context_client.GetTopology(TopologyId(**TOPOLOGY_ID)) assert response.topology_id.context_id.context_uuid.uuid == context_uuid @@ -161,10 +150,10 @@ def test_link(context_client : ContextClient) -> None: context_client.RemoveLink(LinkId(**LINK_R1_R2_ID)) # ----- Check remove event ----------------------------------------------------------------------------------------- - #event = events_collector.get_event(block=True) - #assert isinstance(event, LinkEvent) - #assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - #assert event.link_id.link_uuid.uuid == link_uuid + event = events_collector.get_event(block=True, timeout=1.0) + assert isinstance(event, LinkEvent) + assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert event.link_id.link_uuid.uuid == link_uuid # ----- List after deleting the object ----------------------------------------------------------------------------- response = context_client.ListLinkIds(Empty()) @@ -187,20 +176,20 @@ def test_link(context_client : ContextClient) -> None: context_client.RemoveTopology(TopologyId(**TOPOLOGY_ID)) context_client.RemoveContext(ContextId(**CONTEXT_ID)) - #events = events_collector.get_events(block=True, count=4) - #assert isinstance(events[0], DeviceEvent) - #assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - #assert events[0].device_id.device_uuid.uuid == device_r1_uuid - #assert isinstance(events[1], DeviceEvent) - #assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - #assert events[1].device_id.device_uuid.uuid == device_r2_uuid - #assert isinstance(events[2], TopologyEvent) - #assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - #assert events[2].topology_id.context_id.context_uuid.uuid == context_uuid - #assert events[2].topology_id.topology_uuid.uuid == topology_uuid - #assert isinstance(events[3], ContextEvent) - #assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - #assert events[3].context_id.context_uuid.uuid == context_uuid + events = events_collector.get_events(block=True, count=4, timeout=1.0) + assert isinstance(events[0], DeviceEvent) + assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert events[0].device_id.device_uuid.uuid == device_r1_uuid + assert isinstance(events[1], DeviceEvent) + assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert events[1].device_id.device_uuid.uuid == device_r2_uuid + assert isinstance(events[2], TopologyEvent) + assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert events[2].topology_id.context_id.context_uuid.uuid == context_uuid + assert events[2].topology_id.topology_uuid.uuid == topology_uuid + assert isinstance(events[3], ContextEvent) + assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert events[3].context_id.context_uuid.uuid == context_uuid # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- - #events_collector.stop() + events_collector.stop() -- GitLab From e6cfad2f9b9c4119194333234148bc52d62839ce Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Fri, 13 Jan 2023 17:25:32 +0000 Subject: [PATCH 051/158] Context: - removed unneeded files - added control when no database engine can be instantiated - added missing assertions in device and link unitary tests --- src/context/service/ChangeFeedClient.py | 87 ----------------------- src/context/service/ChangeFeedExample.txt | 33 --------- src/context/service/__main__.py | 2 + src/context/tests/test_device.py | 1 + src/context/tests/test_link.py | 1 + 5 files changed, 4 insertions(+), 120 deletions(-) delete mode 100644 src/context/service/ChangeFeedClient.py delete mode 100644 src/context/service/ChangeFeedExample.txt diff --git a/src/context/service/ChangeFeedClient.py b/src/context/service/ChangeFeedClient.py deleted file mode 100644 index 8285dc6c3..000000000 --- a/src/context/service/ChangeFeedClient.py +++ /dev/null @@ -1,87 +0,0 @@ -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# pip install psycopg==3.1.6 -# Ref: https://www.cockroachlabs.com/docs/stable/changefeed-for.html -# (current implementation) Ref: https://www.cockroachlabs.com/docs/v22.1/changefeed-for -# Ref: https://www.psycopg.org/psycopg3/docs/api/crdb.html - -import contextlib, json, logging, psycopg, psycopg.conninfo, psycopg.crdb, sys, time -from typing import Any, Dict, Iterator, List, Optional, Tuple -from common.Settings import get_setting - -LOGGER = logging.getLogger(__name__) - -SQL_ACTIVATE_CHANGE_FEED = 'SET CLUSTER SETTING kv.rangefeed.enabled = true' -SQL_START_CHANGE_FEED = 'EXPERIMENTAL CHANGEFEED FOR {:s}.{:s} WITH format=json, no_initial_scan, updated' - -class ChangeFeedClient: - def __init__(self) -> None: - self._connection : Optional[psycopg.crdb.CrdbConnection] = None - self._conn_info_dict : Dict = dict() - self._is_crdb : bool = False - - def initialize(self) -> bool: - crdb_uri = get_setting('CRDB_URI') - if crdb_uri is None: - LOGGER.error('Connection string not found in EnvVar CRDB_URI') - return False - - try: - crdb_uri = crdb_uri.replace('cockroachdb://', 'postgres://') - self._conn_info_dict = psycopg.conninfo.conninfo_to_dict(crdb_uri) - except psycopg.ProgrammingError: - LOGGER.exception('Invalid connection string: {:s}'.format(str(crdb_uri))) - return False - - self._connection = psycopg.crdb.connect(**self._conn_info_dict) - self._is_crdb = psycopg.crdb.CrdbConnection.is_crdb(self._connection) - LOGGER.debug('is_crdb = {:s}'.format(str(self._is_crdb))) - - # disable multi-statement transactions - self._connection.autocommit = True - - # activate change feeds - self._connection.execute(SQL_ACTIVATE_CHANGE_FEED) - - return self._is_crdb - - def get_changes(self, table_name : str) -> Iterator[Tuple[float, str, List[Any], bool, Dict]]: - db_name = self._conn_info_dict.get('dbname') - if db_name is None: raise Exception('ChangeFeed has not been initialized!') - cur = self._connection.cursor() - str_sql_query = SQL_START_CHANGE_FEED.format(db_name, table_name) - with contextlib.closing(cur.stream(str_sql_query)) as feed: - for change in feed: - LOGGER.info(change) - table_name, primary_key, data = change[0], json.loads(change[1]), json.loads(change[2]) - timestamp = data.get('updated') / 1.e9 - if timestamp is None: timestamp = time.time() - after = data.get('after') - is_delete = ('after' in data) and (after is None) - yield timestamp, table_name, primary_key, is_delete, after - -def main(): - logging.basicConfig(level=logging.INFO) - - cf = ChangeFeed() - ready = cf.initialize() - if not ready: raise Exception('Unable to initialize ChangeFeed') - for change in cf.get_changes('context'): - LOGGER.info(change) - - return 0 - -if __name__ == '__main__': - sys.exit(main()) diff --git a/src/context/service/ChangeFeedExample.txt b/src/context/service/ChangeFeedExample.txt deleted file mode 100644 index 679a7c716..000000000 --- a/src/context/service/ChangeFeedExample.txt +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - @safe_and_metered_rpc_method(METRICS, LOGGER) - def GetContextEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[ContextEvent]: - pass - #for message in self.messagebroker.consume({TOPIC_CONTEXT}, consume_timeout=CONSUME_TIMEOUT): - # yield ContextEvent(**json.loads(message.content)) - #cf = ChangeFeedClient() - #ready = cf.initialize() - #if not ready: raise OperationFailedException('Initialize ChangeFeed') - #for timestamp, _, primary_key, is_delete, after in cf.get_changes('context'): - # if is_delete: - # event_type = EventTypeEnum.EVENTTYPE_REMOVE - # else: - # is_create = (timestamp - after.get('created_at')) < 1.0 - # event_type = EventTypeEnum.EVENTTYPE_CREATE if is_create else EventTypeEnum.EVENTTYPE_UPDATE - # event = { - # 'event': {'timestamp': {'timestamp': timestamp}, 'event_type': event_type}, - # 'context_id': json_context_id(primary_key[0]), - # } - # yield ContextEvent(**event) diff --git a/src/context/service/__main__.py b/src/context/service/__main__.py index 145c91cf0..f15c8fde0 100644 --- a/src/context/service/__main__.py +++ b/src/context/service/__main__.py @@ -43,7 +43,9 @@ def main(): metrics_port = get_metrics_port() start_http_server(metrics_port) + # Get Database Engine instance and initialize database, if needed db_engine = Engine.get_engine() + if db_engine is None: return -1 Engine.create_database(db_engine) rebuild_database(db_engine) diff --git a/src/context/tests/test_device.py b/src/context/tests/test_device.py index 4080cfcac..6e2fdd52d 100644 --- a/src/context/tests/test_device.py +++ b/src/context/tests/test_device.py @@ -38,6 +38,7 @@ def test_device(context_client : ContextClient) -> None: context_uuid = response.context_uuid.uuid response = context_client.SetTopology(Topology(**TOPOLOGY)) + assert response.context_id.context_uuid.uuid == context_uuid topology_uuid = response.topology_uuid.uuid events = events_collector.get_events(block=True, count=2, timeout=1.0) diff --git a/src/context/tests/test_link.py b/src/context/tests/test_link.py index 5167c41b8..59fed4870 100644 --- a/src/context/tests/test_link.py +++ b/src/context/tests/test_link.py @@ -40,6 +40,7 @@ def test_link(context_client : ContextClient) -> None: context_uuid = response.context_uuid.uuid response = context_client.SetTopology(Topology(**TOPOLOGY)) + assert response.context_id.context_uuid.uuid == context_uuid topology_uuid = response.topology_uuid.uuid response = context_client.SetDevice(Device(**DEVICE_R1)) -- GitLab From b5a26ccfb8fd61f9ea7d192c331a013b992c2782 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Fri, 13 Jan 2023 17:30:53 +0000 Subject: [PATCH 052/158] Context: - added logs to CI/CD pipeline for debug purposes --- src/context/.gitlab-ci.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/context/.gitlab-ci.yml b/src/context/.gitlab-ci.yml index ba3b726dc..80e12544c 100644 --- a/src/context/.gitlab-ci.yml +++ b/src/context/.gitlab-ci.yml @@ -71,8 +71,12 @@ unit test context: - echo "Waiting for initialization..." - sleep 10 - docker ps -a + - docker logs crdb + - docker logs nats - CRDB_ADDRESS=$(docker inspect crdb --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}") + - echo $CRDB_ADDRESS - NATS_ADDRESS=$(docker inspect nats --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}") + - echo $NATS_ADDRESS - > docker run --name $IMAGE_NAME -d -p 1010:1010 --env "CRDB_URI=cockroachdb://tfs:tfs123@${CRDB_ADDRESS}:26257/tfs_test?sslmode=require" -- GitLab From b9db50d5deffdeaf1b4dc579c9ca7585e45e0377 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Fri, 13 Jan 2023 17:44:42 +0000 Subject: [PATCH 053/158] Context: - testing CI/CD pipeline --- src/context/.gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/context/.gitlab-ci.yml b/src/context/.gitlab-ci.yml index 80e12544c..61c59cb44 100644 --- a/src/context/.gitlab-ci.yml +++ b/src/context/.gitlab-ci.yml @@ -69,7 +69,7 @@ unit test context: docker run --name nats -d --network=teraflowbridge -p 4222:4222 -p 8222:8222 nats:2.9 --http_port 8222 --user tfs --pass tfs123 - echo "Waiting for initialization..." - - sleep 10 + - sleep 15 - docker ps -a - docker logs crdb - docker logs nats -- GitLab From a3f7e8f7466bb98300765a3931fb364a0e8401aa Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Fri, 13 Jan 2023 18:01:18 +0000 Subject: [PATCH 054/158] Context: - migrated events for Service and Slice entities - added missing not-nulls to database fields --- .../service/ContextServiceServicerImpl.py | 51 ++-- src/context/service/database/ConfigRule.py | 2 +- src/context/service/database/Connection.py | 2 +- src/context/service/database/Constraint.py | 25 +- src/context/service/database/Service.py | 55 +++-- src/context/service/database/Slice.py | 60 +++-- .../database/models/ConfigRuleModel.py | 4 +- .../database/models/ConstraintModel.py | 4 +- .../service/database/models/ContextModel.py | 4 +- .../service/database/models/DeviceModel.py | 4 +- .../service/database/models/EndPointModel.py | 4 +- .../service/database/models/LinkModel.py | 4 +- .../service/database/models/ServiceModel.py | 4 +- .../service/database/models/SliceModel.py | 4 +- .../service/database/models/TopologyModel.py | 4 +- src/context/tests/test_service.py | 126 +++++----- src/context/tests/test_slice.py | 227 ++++++++++-------- 17 files changed, 336 insertions(+), 248 deletions(-) diff --git a/src/context/service/ContextServiceServicerImpl.py b/src/context/service/ContextServiceServicerImpl.py index 5c9565859..34608d619 100644 --- a/src/context/service/ContextServiceServicerImpl.py +++ b/src/context/service/ContextServiceServicerImpl.py @@ -200,28 +200,29 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def ListServiceIds(self, request : ContextId, context : grpc.ServicerContext) -> ServiceIdList: - return service_list_ids(self.db_engine, request) + return ServiceIdList(service_ids=service_list_ids(self.db_engine, request)) @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def ListServices(self, request : ContextId, context : grpc.ServicerContext) -> ServiceList: - return service_list_objs(self.db_engine, request) + return ServiceList(services=service_list_objs(self.db_engine, request)) @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def GetService(self, request : ServiceId, context : grpc.ServicerContext) -> Service: - return service_get(self.db_engine, request) + return Service(**service_get(self.db_engine, request)) @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def SetService(self, request : Service, context : grpc.ServicerContext) -> ServiceId: - service_id,updated = service_set(self.db_engine, request) # pylint: disable=unused-variable - #event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE - #notify_event(self.messagebroker, TOPIC_SERVICE, event_type, {'service_id': service_id}) - return service_id + service_id,updated = service_set(self.db_engine, request) + event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE + notify_event(self.messagebroker, TOPIC_SERVICE, event_type, {'service_id': service_id}) + return ServiceId(**service_id) @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def RemoveService(self, request : ServiceId, context : grpc.ServicerContext) -> Empty: - deleted = service_delete(self.db_engine, request) # pylint: disable=unused-variable - #if deleted: - # notify_event(self.messagebroker, TOPIC_SERVICE, EventTypeEnum.EVENTTYPE_REMOVE, {'service_id': request}) + service_id,deleted = service_delete(self.db_engine, request) + if deleted: + event_type = EventTypeEnum.EVENTTYPE_REMOVE + notify_event(self.messagebroker, TOPIC_SERVICE, event_type, {'service_id': service_id}) return Empty() @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) @@ -234,35 +235,37 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def ListSliceIds(self, request : ContextId, context : grpc.ServicerContext) -> SliceIdList: - return slice_list_ids(self.db_engine, request) + return SliceIdList(slice_ids=slice_list_ids(self.db_engine, request)) @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def ListSlices(self, request : ContextId, context : grpc.ServicerContext) -> SliceList: - return slice_list_objs(self.db_engine, request) + return SliceList(slices=slice_list_objs(self.db_engine, request)) @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def GetSlice(self, request : SliceId, context : grpc.ServicerContext) -> Slice: - return slice_get(self.db_engine, request) + return Slice(**slice_get(self.db_engine, request)) @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def SetSlice(self, request : Slice, context : grpc.ServicerContext) -> SliceId: - slice_id,updated = slice_set(self.db_engine, request) # pylint: disable=unused-variable - #event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE - #notify_event(self.messagebroker, TOPIC_SLICE, event_type, {'slice_id': slice_id}) - return slice_id + slice_id,updated = slice_set(self.db_engine, request) + event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE + notify_event(self.messagebroker, TOPIC_SLICE, event_type, {'slice_id': slice_id}) + return SliceId(**slice_id) @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def UnsetSlice(self, request : Slice, context : grpc.ServicerContext) -> SliceId: - slice_id,updated = slice_unset(self.db_engine, request) # pylint: disable=unused-variable - #if updated: - # notify_event(self.messagebroker, TOPIC_SLICE, EventTypeEnum.EVENTTYPE_UPDATE, {'slice_id': slice_id}) - return slice_id + slice_id,updated = slice_unset(self.db_engine, request) + if updated: + event_type = EventTypeEnum.EVENTTYPE_UPDATE + notify_event(self.messagebroker, TOPIC_SLICE, event_type, {'slice_id': slice_id}) + return SliceId(**slice_id) @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def RemoveSlice(self, request : SliceId, context : grpc.ServicerContext) -> Empty: - deleted = slice_delete(self.db_engine, request) # pylint: disable=unused-variable - #if deleted: - # notify_event(self.messagebroker, TOPIC_SLICE, EventTypeEnum.EVENTTYPE_REMOVE, {'slice_id': request}) + slice_id,deleted = slice_delete(self.db_engine, request) + if deleted: + event_type = EventTypeEnum.EVENTTYPE_REMOVE + notify_event(self.messagebroker, TOPIC_SLICE, event_type, {'slice_id': slice_id}) return Empty() @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) diff --git a/src/context/service/database/ConfigRule.py b/src/context/service/database/ConfigRule.py index 5f701386f..5443e178c 100644 --- a/src/context/service/database/ConfigRule.py +++ b/src/context/service/database/ConfigRule.py @@ -51,7 +51,7 @@ def compose_config_rules_data( def upsert_config_rules( session : Session, config_rules : List[Dict], device_uuid : Optional[str] = None, service_uuid : Optional[str] = None, slice_uuid : Optional[str] = None, -) -> bool: +) -> List[bool]: # TODO: do not delete all rules; just add-remove as needed stmt = delete(ConfigRuleModel) if device_uuid is not None: stmt = stmt.where(ConfigRuleModel.device_uuid == device_uuid ) diff --git a/src/context/service/database/Connection.py b/src/context/service/database/Connection.py index 42fc86ebf..2f6fb8433 100644 --- a/src/context/service/database/Connection.py +++ b/src/context/service/database/Connection.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import re +import datetime, logging, re from sqlalchemy.dialects.postgresql import insert from sqlalchemy.engine import Engine from sqlalchemy.exc import IntegrityError diff --git a/src/context/service/database/Constraint.py b/src/context/service/database/Constraint.py index f79159a35..2880c05a8 100644 --- a/src/context/service/database/Constraint.py +++ b/src/context/service/database/Constraint.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import datetime, logging from sqlalchemy import delete from sqlalchemy.dialects.postgresql import insert from sqlalchemy.orm import Session @@ -21,8 +22,10 @@ from common.tools.grpc.Tools import grpc_message_to_json_string from .models.ConstraintModel import ConstraintKindEnum, ConstraintModel from .uuids._Builder import get_uuid_random +LOGGER = logging.getLogger(__name__) + def compose_constraints_data( - constraints : List[Constraint], + constraints : List[Constraint], now : datetime.datetime, service_uuid : Optional[str] = None, slice_uuid : Optional[str] = None ) -> List[Dict]: dict_constraints : List[Dict] = list() @@ -33,6 +36,8 @@ def compose_constraints_data( 'position' : position, 'kind' : ConstraintKindEnum._member_map_.get(str_kind.upper()), # pylint: disable=no-member 'data' : grpc_message_to_json_string(getattr(constraint, str_kind, {})), + 'created_at' : now, + 'updated_at' : now, } if service_uuid is not None: dict_constraint['service_uuid'] = service_uuid if slice_uuid is not None: dict_constraint['slice_uuid' ] = slice_uuid @@ -42,13 +47,27 @@ def compose_constraints_data( def upsert_constraints( session : Session, constraints : List[Dict], service_uuid : Optional[str] = None, slice_uuid : Optional[str] = None -) -> None: +) -> List[bool]: + # TODO: do not delete all constraints; just add-remove as needed stmt = delete(ConstraintModel) if service_uuid is not None: stmt = stmt.where(ConstraintModel.service_uuid == service_uuid) if slice_uuid is not None: stmt = stmt.where(ConstraintModel.slice_uuid == slice_uuid ) session.execute(stmt) + + constraint_updates = [] if len(constraints) > 0: - session.execute(insert(ConstraintModel).values(constraints)) + stmt = insert(ConstraintModel).values(constraints) + #stmt = stmt.on_conflict_do_update( + # index_elements=[ConstraintModel.configrule_uuid], + # set_=dict( + # updated_at = stmt.excluded.updated_at, + # ) + #) + stmt = stmt.returning(ConstraintModel.created_at, ConstraintModel.updated_at) + constraint_updates = session.execute(stmt).fetchall() + + return constraint_updates + # def set_constraint(self, db_constraints: ConstraintsModel, grpc_constraint: Constraint, position: int # ) -> Tuple[Union_ConstraintModel, bool]: diff --git a/src/context/service/database/Service.py b/src/context/service/database/Service.py index 247914d65..a8f9f40d6 100644 --- a/src/context/service/database/Service.py +++ b/src/context/service/database/Service.py @@ -12,12 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. +import datetime, logging from sqlalchemy.dialects.postgresql import insert from sqlalchemy.engine import Engine from sqlalchemy.orm import Session, sessionmaker from sqlalchemy_cockroachdb import run_transaction from typing import Dict, List, Optional, Tuple -from common.proto.context_pb2 import ContextId, Service, ServiceId, ServiceIdList, ServiceList +from common.proto.context_pb2 import ContextId, Service, ServiceId from common.method_wrappers.ServiceExceptions import InvalidArgumentException, NotFoundException from common.tools.object_factory.Context import json_context_id from common.tools.object_factory.Service import json_service_id @@ -30,23 +31,23 @@ from .uuids.Context import context_get_uuid from .uuids.EndPoint import endpoint_get_uuid from .uuids.Service import service_get_uuid -def service_list_ids(db_engine : Engine, request : ContextId) -> ServiceIdList: +LOGGER = logging.getLogger(__name__) + +def service_list_ids(db_engine : Engine, request : ContextId) -> List[Dict]: context_uuid = context_get_uuid(request, allow_random=False) def callback(session : Session) -> List[Dict]: obj_list : List[ServiceModel] = session.query(ServiceModel).filter_by(context_uuid=context_uuid).all() - #.options(selectinload(ContextModel.service)).filter_by(context_uuid=context_uuid).one_or_none() return [obj.dump_id() for obj in obj_list] - return ServiceIdList(service_ids=run_transaction(sessionmaker(bind=db_engine), callback)) + return run_transaction(sessionmaker(bind=db_engine), callback) -def service_list_objs(db_engine : Engine, request : ContextId) -> ServiceList: +def service_list_objs(db_engine : Engine, request : ContextId) -> List[Dict]: context_uuid = context_get_uuid(request, allow_random=False) def callback(session : Session) -> List[Dict]: obj_list : List[ServiceModel] = session.query(ServiceModel).filter_by(context_uuid=context_uuid).all() - #.options(selectinload(ContextModel.service)).filter_by(context_uuid=context_uuid).one_or_none() return [obj.dump() for obj in obj_list] - return ServiceList(services=run_transaction(sessionmaker(bind=db_engine), callback)) + return run_transaction(sessionmaker(bind=db_engine), callback) -def service_get(db_engine : Engine, request : ServiceId) -> Service: +def service_get(db_engine : Engine, request : ServiceId) -> Dict: _,service_uuid = service_get_uuid(request, allow_random=False) def callback(session : Session) -> Optional[Dict]: obj : Optional[ServiceModel] = session.query(ServiceModel).filter_by(service_uuid=service_uuid).one_or_none() @@ -59,9 +60,9 @@ def service_get(db_engine : Engine, request : ServiceId) -> Service: 'context_uuid generated was: {:s}'.format(context_uuid), 'service_uuid generated was: {:s}'.format(service_uuid), ]) - return Service(**obj) + return obj -def service_set(db_engine : Engine, request : Service) -> Tuple[ServiceId, bool]: +def service_set(db_engine : Engine, request : Service) -> Tuple[Dict, bool]: raw_context_uuid = request.service_id.context_id.context_uuid.uuid raw_service_uuid = request.service_id.service_uuid.uuid raw_service_name = request.name @@ -71,6 +72,8 @@ def service_set(db_engine : Engine, request : Service) -> Tuple[ServiceId, bool] service_type = grpc_to_enum__service_type(request.service_type) service_status = grpc_to_enum__service_status(request.service_status.service_status) + now = datetime.datetime.utcnow() + service_endpoints_data : List[Dict] = list() for i,endpoint_id in enumerate(request.service_endpoint_ids): endpoint_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid @@ -87,8 +90,8 @@ def service_set(db_engine : Engine, request : Service) -> Tuple[ServiceId, bool] 'endpoint_uuid': endpoint_uuid, }) - constraints = compose_constraints_data(request.service_constraints, service_uuid=service_uuid) - config_rules = compose_config_rules_data(request.service_config.config_rules, service_uuid=service_uuid) + constraints = compose_constraints_data(request.service_constraints, now, service_uuid=service_uuid) + config_rules = compose_config_rules_data(request.service_config.config_rules, now, service_uuid=service_uuid) service_data = [{ 'context_uuid' : context_uuid, @@ -96,9 +99,11 @@ def service_set(db_engine : Engine, request : Service) -> Tuple[ServiceId, bool] 'service_name' : service_name, 'service_type' : service_type, 'service_status': service_status, + 'created_at' : now, + 'updated_at' : now, }] - def callback(session : Session) -> None: + def callback(session : Session) -> bool: stmt = insert(ServiceModel).values(service_data) stmt = stmt.on_conflict_do_update( index_elements=[ServiceModel.service_uuid], @@ -108,7 +113,9 @@ def service_set(db_engine : Engine, request : Service) -> Tuple[ServiceId, bool] service_status = stmt.excluded.service_status, ) ) - session.execute(stmt) + stmt = stmt.returning(ServiceModel.created_at, ServiceModel.updated_at) + created_at,updated_at = session.execute(stmt).fetchone() + updated = updated_at > created_at stmt = insert(ServiceEndPointModel).values(service_endpoints_data) stmt = stmt.on_conflict_do_nothing( @@ -116,17 +123,21 @@ def service_set(db_engine : Engine, request : Service) -> Tuple[ServiceId, bool] ) session.execute(stmt) - upsert_constraints(session, constraints, service_uuid=service_uuid) - upsert_config_rules(session, config_rules, service_uuid=service_uuid) + constraint_updates = upsert_constraints(session, constraints, service_uuid=service_uuid) + updated = updated or any([(updated_at > created_at) for created_at,updated_at in constraint_updates]) - run_transaction(sessionmaker(bind=db_engine), callback) - updated = False # TODO: improve and check if created/updated - return ServiceId(**json_service_id(service_uuid, json_context_id(context_uuid))),updated + configrule_updates = upsert_config_rules(session, config_rules, service_uuid=service_uuid) + updated = updated or any([(updated_at > created_at) for created_at,updated_at in configrule_updates]) + return updated -def service_delete(db_engine : Engine, request : ServiceId) -> bool: - _,service_uuid = service_get_uuid(request, allow_random=False) + updated = run_transaction(sessionmaker(bind=db_engine), callback) + return json_service_id(service_uuid, json_context_id(context_uuid)),updated + +def service_delete(db_engine : Engine, request : ServiceId) -> Tuple[Dict, bool]: + context_uuid,service_uuid = service_get_uuid(request, allow_random=False) def callback(session : Session) -> bool: num_deleted = session.query(ServiceModel).filter_by(service_uuid=service_uuid).delete() return num_deleted > 0 - return run_transaction(sessionmaker(bind=db_engine), callback) + deleted = run_transaction(sessionmaker(bind=db_engine), callback) + return json_service_id(service_uuid, json_context_id(context_uuid)),deleted diff --git a/src/context/service/database/Slice.py b/src/context/service/database/Slice.py index e963fb772..f255968b2 100644 --- a/src/context/service/database/Slice.py +++ b/src/context/service/database/Slice.py @@ -12,13 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. +import datetime, logging from sqlalchemy import and_ from sqlalchemy.dialects.postgresql import insert from sqlalchemy.engine import Engine from sqlalchemy.orm import Session, sessionmaker from sqlalchemy_cockroachdb import run_transaction from typing import Dict, List, Optional, Set, Tuple -from common.proto.context_pb2 import ContextId, Slice, SliceId, SliceIdList, SliceList +from common.proto.context_pb2 import ContextId, Slice, SliceId from common.method_wrappers.ServiceExceptions import InvalidArgumentException, NotFoundException from common.tools.object_factory.Context import json_context_id from common.tools.object_factory.Slice import json_slice_id @@ -31,23 +32,23 @@ from .uuids.EndPoint import endpoint_get_uuid from .uuids.Service import service_get_uuid from .uuids.Slice import slice_get_uuid -def slice_list_ids(db_engine : Engine, request : ContextId) -> SliceIdList: +LOGGER = logging.getLogger(__name__) + +def slice_list_ids(db_engine : Engine, request : ContextId) -> List[Dict]: context_uuid = context_get_uuid(request, allow_random=False) def callback(session : Session) -> List[Dict]: obj_list : List[SliceModel] = session.query(SliceModel).filter_by(context_uuid=context_uuid).all() - #.options(selectinload(ContextModel.slice)).filter_by(context_uuid=context_uuid).one_or_none() return [obj.dump_id() for obj in obj_list] - return SliceIdList(slice_ids=run_transaction(sessionmaker(bind=db_engine), callback)) + return run_transaction(sessionmaker(bind=db_engine), callback) -def slice_list_objs(db_engine : Engine, request : ContextId) -> SliceList: +def slice_list_objs(db_engine : Engine, request : ContextId) -> List[Dict]: context_uuid = context_get_uuid(request, allow_random=False) def callback(session : Session) -> List[Dict]: obj_list : List[SliceModel] = session.query(SliceModel).filter_by(context_uuid=context_uuid).all() - #.options(selectinload(ContextModel.slice)).filter_by(context_uuid=context_uuid).one_or_none() return [obj.dump() for obj in obj_list] - return SliceList(slices=run_transaction(sessionmaker(bind=db_engine), callback)) + return run_transaction(sessionmaker(bind=db_engine), callback) -def slice_get(db_engine : Engine, request : SliceId) -> Slice: +def slice_get(db_engine : Engine, request : SliceId) -> Dict: _,slice_uuid = slice_get_uuid(request, allow_random=False) def callback(session : Session) -> Optional[Dict]: obj : Optional[SliceModel] = session.query(SliceModel).filter_by(slice_uuid=slice_uuid).one_or_none() @@ -60,9 +61,9 @@ def slice_get(db_engine : Engine, request : SliceId) -> Slice: 'context_uuid generated was: {:s}'.format(context_uuid), 'slice_uuid generated was: {:s}'.format(slice_uuid), ]) - return Slice(**obj) + return obj -def slice_set(db_engine : Engine, request : Slice) -> Tuple[SliceId, bool]: +def slice_set(db_engine : Engine, request : Slice) -> Tuple[Dict, bool]: raw_context_uuid = request.slice_id.context_id.context_uuid.uuid raw_slice_uuid = request.slice_id.slice_uuid.uuid raw_slice_name = request.name @@ -71,6 +72,8 @@ def slice_set(db_engine : Engine, request : Slice) -> Tuple[SliceId, bool]: slice_status = grpc_to_enum__slice_status(request.slice_status.slice_status) + now = datetime.datetime.utcnow() + slice_endpoints_data : List[Dict] = list() for i,endpoint_id in enumerate(request.slice_endpoint_ids): endpoint_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid @@ -103,8 +106,8 @@ def slice_set(db_engine : Engine, request : Slice) -> Tuple[SliceId, bool]: 'subslice_uuid': subslice_uuid, }) - constraints = compose_constraints_data(request.slice_constraints, slice_uuid=slice_uuid) - config_rules = compose_config_rules_data(request.slice_config.config_rules, slice_uuid=slice_uuid) + constraints = compose_constraints_data(request.slice_constraints, now, slice_uuid=slice_uuid) + config_rules = compose_config_rules_data(request.slice_config.config_rules, now, slice_uuid=slice_uuid) slice_data = [{ 'context_uuid' : context_uuid, @@ -113,9 +116,11 @@ def slice_set(db_engine : Engine, request : Slice) -> Tuple[SliceId, bool]: 'slice_status' : slice_status, 'slice_owner_uuid' : request.slice_owner.owner_uuid.uuid, 'slice_owner_string': request.slice_owner.owner_string, + 'created_at' : now, + 'updated_at' : now, }] - def callback(session : Session) -> None: + def callback(session : Session) -> bool: stmt = insert(SliceModel).values(slice_data) stmt = stmt.on_conflict_do_update( index_elements=[SliceModel.slice_uuid], @@ -126,7 +131,9 @@ def slice_set(db_engine : Engine, request : Slice) -> Tuple[SliceId, bool]: slice_owner_string = stmt.excluded.slice_owner_string, ) ) - session.execute(stmt) + stmt = stmt.returning(SliceModel.created_at, SliceModel.updated_at) + created_at,updated_at = session.execute(stmt).fetchone() + updated = updated_at > created_at if len(slice_endpoints_data) > 0: stmt = insert(SliceEndPointModel).values(slice_endpoints_data) @@ -149,14 +156,18 @@ def slice_set(db_engine : Engine, request : Slice) -> Tuple[SliceId, bool]: ) session.execute(stmt) - upsert_constraints(session, constraints, slice_uuid=slice_uuid) - upsert_config_rules(session, config_rules, slice_uuid=slice_uuid) + constraint_updates = upsert_constraints(session, constraints, slice_uuid=slice_uuid) + updated = updated or any([(updated_at > created_at) for created_at,updated_at in constraint_updates]) - run_transaction(sessionmaker(bind=db_engine), callback) - updated = False # TODO: improve and check if created/updated - return SliceId(**json_slice_id(slice_uuid, json_context_id(context_uuid))),updated + configrule_updates = upsert_config_rules(session, config_rules, slice_uuid=slice_uuid) + updated = updated or any([(updated_at > created_at) for created_at,updated_at in configrule_updates]) -def slice_unset(db_engine : Engine, request : Slice) -> Tuple[SliceId, bool]: + return updated + + updated = run_transaction(sessionmaker(bind=db_engine), callback) + return json_slice_id(slice_uuid, json_context_id(context_uuid)),updated + +def slice_unset(db_engine : Engine, request : Slice) -> Tuple[Dict, bool]: raw_context_uuid = request.slice_id.context_id.context_uuid.uuid raw_slice_uuid = request.slice_id.slice_uuid.uuid raw_slice_name = request.name @@ -208,11 +219,12 @@ def slice_unset(db_engine : Engine, request : Slice) -> Tuple[SliceId, bool]: return num_deletes > 0 updated = run_transaction(sessionmaker(bind=db_engine), callback) - return SliceId(**json_slice_id(slice_uuid, json_context_id(context_uuid))),updated + return json_slice_id(slice_uuid, json_context_id(context_uuid)),updated -def slice_delete(db_engine : Engine, request : SliceId) -> bool: - _,slice_uuid = slice_get_uuid(request, allow_random=False) +def slice_delete(db_engine : Engine, request : SliceId) -> Tuple[Dict, bool]: + context_uuid,slice_uuid = slice_get_uuid(request, allow_random=False) def callback(session : Session) -> bool: num_deleted = session.query(SliceModel).filter_by(slice_uuid=slice_uuid).delete() return num_deleted > 0 - return run_transaction(sessionmaker(bind=db_engine), callback) + deleted = run_transaction(sessionmaker(bind=db_engine), callback) + return json_slice_id(slice_uuid, json_context_id(context_uuid)),deleted diff --git a/src/context/service/database/models/ConfigRuleModel.py b/src/context/service/database/models/ConfigRuleModel.py index a697de556..c2305b001 100644 --- a/src/context/service/database/models/ConfigRuleModel.py +++ b/src/context/service/database/models/ConfigRuleModel.py @@ -35,8 +35,8 @@ class ConfigRuleModel(_Base): kind = Column(Enum(ConfigRuleKindEnum), nullable=False) action = Column(Enum(ORM_ConfigActionEnum), nullable=False) data = Column(String, nullable=False) - created_at = Column(DateTime) - updated_at = Column(DateTime) + created_at = Column(DateTime, nullable=False) + updated_at = Column(DateTime, nullable=False) __table_args__ = ( CheckConstraint(position >= 0, name='check_position_value'), diff --git a/src/context/service/database/models/ConstraintModel.py b/src/context/service/database/models/ConstraintModel.py index 30ade508e..51fc0b91d 100644 --- a/src/context/service/database/models/ConstraintModel.py +++ b/src/context/service/database/models/ConstraintModel.py @@ -13,7 +13,7 @@ # limitations under the License. import enum, json -from sqlalchemy import CheckConstraint, Column, Enum, ForeignKey, Integer, String +from sqlalchemy import CheckConstraint, Column, DateTime, Enum, ForeignKey, Integer, String from sqlalchemy.dialects.postgresql import UUID from typing import Dict from ._Base import _Base @@ -35,6 +35,8 @@ class ConstraintModel(_Base): position = Column(Integer, nullable=False) kind = Column(Enum(ConstraintKindEnum), nullable=False) data = Column(String, nullable=False) + created_at = Column(DateTime, nullable=False) + updated_at = Column(DateTime, nullable=False) __table_args__ = ( CheckConstraint(position >= 0, name='check_position_value'), diff --git a/src/context/service/database/models/ContextModel.py b/src/context/service/database/models/ContextModel.py index fee0f72a5..26ccd8c60 100644 --- a/src/context/service/database/models/ContextModel.py +++ b/src/context/service/database/models/ContextModel.py @@ -23,8 +23,8 @@ class ContextModel(_Base): context_uuid = Column(UUID(as_uuid=False), primary_key=True) context_name = Column(String, nullable=False) - created_at = Column(DateTime) - updated_at = Column(DateTime) + created_at = Column(DateTime, nullable=False) + updated_at = Column(DateTime, nullable=False) topologies = relationship('TopologyModel', back_populates='context') services = relationship('ServiceModel', back_populates='context') diff --git a/src/context/service/database/models/DeviceModel.py b/src/context/service/database/models/DeviceModel.py index ef56c7158..d73cec75d 100644 --- a/src/context/service/database/models/DeviceModel.py +++ b/src/context/service/database/models/DeviceModel.py @@ -29,8 +29,8 @@ class DeviceModel(_Base): device_type = Column(String, nullable=False) device_operational_status = Column(Enum(ORM_DeviceOperationalStatusEnum), nullable=False) device_drivers = Column(ARRAY(Enum(ORM_DeviceDriverEnum), dimensions=1)) - created_at = Column(DateTime) - updated_at = Column(DateTime) + created_at = Column(DateTime, nullable=False) + updated_at = Column(DateTime, nullable=False) #topology_devices = relationship('TopologyDeviceModel', back_populates='device') config_rules = relationship('ConfigRuleModel', passive_deletes=True) # lazy='joined', back_populates='device' diff --git a/src/context/service/database/models/EndPointModel.py b/src/context/service/database/models/EndPointModel.py index abc16c1af..07a5df2bf 100644 --- a/src/context/service/database/models/EndPointModel.py +++ b/src/context/service/database/models/EndPointModel.py @@ -28,8 +28,8 @@ class EndPointModel(_Base): name = Column(String, nullable=False) endpoint_type = Column(String, nullable=False) kpi_sample_types = Column(ARRAY(Enum(ORM_KpiSampleTypeEnum), dimensions=1)) - created_at = Column(DateTime) - updated_at = Column(DateTime) + created_at = Column(DateTime, nullable=False) + updated_at = Column(DateTime, nullable=False) device = relationship('DeviceModel', back_populates='endpoints') topology = relationship('TopologyModel') diff --git a/src/context/service/database/models/LinkModel.py b/src/context/service/database/models/LinkModel.py index a13f61bf3..abf37a28a 100644 --- a/src/context/service/database/models/LinkModel.py +++ b/src/context/service/database/models/LinkModel.py @@ -23,8 +23,8 @@ class LinkModel(_Base): link_uuid = Column(UUID(as_uuid=False), primary_key=True) link_name = Column(String, nullable=False) - created_at = Column(DateTime) - updated_at = Column(DateTime) + created_at = Column(DateTime, nullable=False) + updated_at = Column(DateTime, nullable=False) #topology_links = relationship('TopologyLinkModel', back_populates='link') link_endpoints = relationship('LinkEndPointModel') # lazy='joined', back_populates='link' diff --git a/src/context/service/database/models/ServiceModel.py b/src/context/service/database/models/ServiceModel.py index 7343b5ade..1a28dbce2 100644 --- a/src/context/service/database/models/ServiceModel.py +++ b/src/context/service/database/models/ServiceModel.py @@ -13,7 +13,7 @@ # limitations under the License. import operator -from sqlalchemy import Column, Enum, ForeignKey, String +from sqlalchemy import Column, DateTime, Enum, ForeignKey, String from sqlalchemy.dialects.postgresql import UUID from sqlalchemy.orm import relationship from typing import Dict @@ -29,6 +29,8 @@ class ServiceModel(_Base): service_name = Column(String, nullable=False) service_type = Column(Enum(ORM_ServiceTypeEnum), nullable=False) service_status = Column(Enum(ORM_ServiceStatusEnum), nullable=False) + created_at = Column(DateTime, nullable=False) + updated_at = Column(DateTime, nullable=False) context = relationship('ContextModel', back_populates='services') service_endpoints = relationship('ServiceEndPointModel') # lazy='joined', back_populates='service' diff --git a/src/context/service/database/models/SliceModel.py b/src/context/service/database/models/SliceModel.py index d3dff51e1..5c9ebafa4 100644 --- a/src/context/service/database/models/SliceModel.py +++ b/src/context/service/database/models/SliceModel.py @@ -13,7 +13,7 @@ # limitations under the License. import operator -from sqlalchemy import Column, Enum, ForeignKey, String, Table +from sqlalchemy import Column, DateTime, Enum, ForeignKey, String from sqlalchemy.dialects.postgresql import UUID from sqlalchemy.orm import relationship from typing import Dict @@ -29,6 +29,8 @@ class SliceModel(_Base): slice_status = Column(Enum(ORM_SliceStatusEnum), nullable=False) slice_owner_uuid = Column(String, nullable=True) slice_owner_string = Column(String, nullable=True) + created_at = Column(DateTime, nullable=False) + updated_at = Column(DateTime, nullable=False) context = relationship('ContextModel', back_populates='slices') slice_endpoints = relationship('SliceEndPointModel') # lazy='joined', back_populates='slice' diff --git a/src/context/service/database/models/TopologyModel.py b/src/context/service/database/models/TopologyModel.py index d4dbe173e..59659ecd3 100644 --- a/src/context/service/database/models/TopologyModel.py +++ b/src/context/service/database/models/TopologyModel.py @@ -24,8 +24,8 @@ class TopologyModel(_Base): topology_uuid = Column(UUID(as_uuid=False), primary_key=True) context_uuid = Column(ForeignKey('context.context_uuid'), nullable=False) topology_name = Column(String, nullable=False) - created_at = Column(DateTime) - updated_at = Column(DateTime) + created_at = Column(DateTime, nullable=False) + updated_at = Column(DateTime, nullable=False) context = relationship('ContextModel', back_populates='topologies') topology_devices = relationship('TopologyDeviceModel') # back_populates='topology' diff --git a/src/context/tests/test_service.py b/src/context/tests/test_service.py index ca81bbfa3..e80437dbb 100644 --- a/src/context/tests/test_service.py +++ b/src/context/tests/test_service.py @@ -12,12 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -import copy, grpc, pytest +import copy, grpc, pytest, time from common.proto.context_pb2 import ( - Context, ContextId, Device, DeviceId, Service, ServiceId, ServiceStatusEnum, ServiceTypeEnum, Topology, TopologyId) + Context, ContextEvent, ContextId, Device, DeviceEvent, DeviceId, EventTypeEnum, Service, ServiceEvent, ServiceId, + ServiceStatusEnum, ServiceTypeEnum, Topology, TopologyEvent, TopologyId) from context.client.ContextClient import ContextClient from context.service.database.uuids.Service import service_get_uuid -#from context.client.EventsCollector import EventsCollector +from context.client.EventsCollector import EventsCollector from .Objects import ( CONTEXT, CONTEXT_ID, CONTEXT_NAME, DEVICE_R1, DEVICE_R1_ID, SERVICE_R1_R2_NAME, DEVICE_R2, DEVICE_R2_ID, SERVICE_R1_R2, SERVICE_R1_R2_ID, TOPOLOGY, TOPOLOGY_ID) @@ -26,33 +27,42 @@ from .Objects import ( def test_service(context_client : ContextClient) -> None: # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- - #events_collector = EventsCollector( - # context_client, log_events_received=True, - # activate_context_collector = False, activate_topology_collector = False, activate_device_collector = False, - # activate_link_collector = False, activate_service_collector = True, activate_slice_collector = False, - # activate_connection_collector = False) - #events_collector.start() + events_collector = EventsCollector( + context_client, log_events_received=True, + activate_context_collector = True, activate_topology_collector = True, activate_device_collector = True, + activate_link_collector = True, activate_service_collector = True, activate_slice_collector = False, + activate_connection_collector = False) + events_collector.start() + time.sleep(3) # ----- Prepare dependencies for the test and capture related events ----------------------------------------------- - context_client.SetContext(Context(**CONTEXT)) - context_client.SetTopology(Topology(**TOPOLOGY)) - context_client.SetDevice(Device(**DEVICE_R1)) - context_client.SetDevice(Device(**DEVICE_R2)) - - # events = events_collector.get_events(block=True, count=4) - # assert isinstance(events[0], ContextEvent) - # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - # assert events[0].context_id.context_uuid.uuid == context_uuid - # assert isinstance(events[1], TopologyEvent) - # assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - # assert events[1].topology_id.context_id.context_uuid.uuid == context_uuid - # assert events[1].topology_id.topology_uuid.uuid == topology_uuid - # assert isinstance(events[2], DeviceEvent) - # assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - # assert events[2].device_id.device_uuid.uuid == device_r1_uuid - # assert isinstance(events[3], DeviceEvent) - # assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - # assert events[3].device_id.device_uuid.uuid == device_r2_uuid + response = context_client.SetContext(Context(**CONTEXT)) + context_uuid = response.context_uuid.uuid + + response = context_client.SetTopology(Topology(**TOPOLOGY)) + assert response.context_id.context_uuid.uuid == context_uuid + topology_uuid = response.topology_uuid.uuid + + response = context_client.SetDevice(Device(**DEVICE_R1)) + device_r1_uuid = response.device_uuid.uuid + + response = context_client.SetDevice(Device(**DEVICE_R2)) + device_r2_uuid = response.device_uuid.uuid + + events = events_collector.get_events(block=True, count=4, timeout=1.0) + assert isinstance(events[0], ContextEvent) + assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert events[0].context_id.context_uuid.uuid == context_uuid + assert isinstance(events[1], TopologyEvent) + assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert events[1].topology_id.context_id.context_uuid.uuid == context_uuid + assert events[1].topology_id.topology_uuid.uuid == topology_uuid + assert isinstance(events[2], DeviceEvent) + assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert events[2].device_id.device_uuid.uuid == device_r1_uuid + assert isinstance(events[3], DeviceEvent) + assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert events[3].device_id.device_uuid.uuid == device_r2_uuid # ----- Get when the object does not exist ------------------------------------------------------------------------- service_id = ServiceId(**SERVICE_R1_R2_ID) @@ -92,11 +102,11 @@ def test_service(context_client : ContextClient) -> None: assert response.service_uuid.uuid == service_uuid # ----- Check create event ----------------------------------------------------------------------------------------- - #event = events_collector.get_event(block=True) - #assert isinstance(event, ServiceEvent) - #assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE - #assert event.service_id.context_id.context_uuid.uuid == context_uuid - #assert event.service_id.service_uuid.uuid == service_uuid + event = events_collector.get_event(block=True, timeout=1.0) + assert isinstance(event, ServiceEvent) + assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert event.service_id.context_id.context_uuid.uuid == context_uuid + assert event.service_id.service_uuid.uuid == service_uuid # ----- Get when the object exists --------------------------------------------------------------------------------- response = context_client.GetContext(ContextId(**CONTEXT_ID)) @@ -145,11 +155,11 @@ def test_service(context_client : ContextClient) -> None: assert response.service_uuid.uuid == service_uuid # ----- Check update event ----------------------------------------------------------------------------------------- - #event = events_collector.get_event(block=True) - #assert isinstance(event, ServiceEvent) - #assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - #assert event.service_id.context_id.context_uuid.uuid == context_uuid - #assert event.service_id.service_uuid.uuid == service_uuid + event = events_collector.get_event(block=True, timeout=1.0) + assert isinstance(event, ServiceEvent) + assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + assert event.service_id.context_id.context_uuid.uuid == context_uuid + assert event.service_id.service_uuid.uuid == service_uuid # ----- Get when the object is modified ---------------------------------------------------------------------------- response = context_client.GetService(ServiceId(**SERVICE_R1_R2_ID)) @@ -183,11 +193,11 @@ def test_service(context_client : ContextClient) -> None: context_client.RemoveService(ServiceId(**SERVICE_R1_R2_ID)) # ----- Check remove event ----------------------------------------------------------------------------------------- - #event = events_collector.get_event(block=True) - #assert isinstance(event, ServiceEvent) - #assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - #assert event.service_id.context_id.context_uuid.uuid == context_uuid - #assert event.service_id.service_uuid.uuid == service_uuid + event = events_collector.get_event(block=True, timeout=1.0) + assert isinstance(event, ServiceEvent) + assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert event.service_id.context_id.context_uuid.uuid == context_uuid + assert event.service_id.service_uuid.uuid == service_uuid # ----- List after deleting the object ----------------------------------------------------------------------------- response = context_client.GetContext(ContextId(**CONTEXT_ID)) @@ -207,20 +217,20 @@ def test_service(context_client : ContextClient) -> None: context_client.RemoveTopology(TopologyId(**TOPOLOGY_ID)) context_client.RemoveContext(ContextId(**CONTEXT_ID)) - #events = events_collector.get_events(block=True, count=4) - #assert isinstance(events[0], DeviceEvent) - #assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - #assert events[0].device_id.device_uuid.uuid == device_r1_uuid - #assert isinstance(events[1], DeviceEvent) - #assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - #assert events[1].device_id.device_uuid.uuid == device_r2_uuid - #assert isinstance(events[2], TopologyEvent) - #assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - #assert events[2].topology_id.context_id.context_uuid.uuid == context_uuid - #assert events[2].topology_id.topology_uuid.uuid == topology_uuid - #assert isinstance(events[3], ContextEvent) - #assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - #assert events[3].context_id.context_uuid.uuid == context_uuid + events = events_collector.get_events(block=True, count=4, timeout=1.0) + assert isinstance(events[0], DeviceEvent) + assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert events[0].device_id.device_uuid.uuid == device_r1_uuid + assert isinstance(events[1], DeviceEvent) + assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert events[1].device_id.device_uuid.uuid == device_r2_uuid + assert isinstance(events[2], TopologyEvent) + assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert events[2].topology_id.context_id.context_uuid.uuid == context_uuid + assert events[2].topology_id.topology_uuid.uuid == topology_uuid + assert isinstance(events[3], ContextEvent) + assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert events[3].context_id.context_uuid.uuid == context_uuid # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- - #events_collector.stop() + events_collector.stop() diff --git a/src/context/tests/test_slice.py b/src/context/tests/test_slice.py index 9d27523b1..cb7eb7737 100644 --- a/src/context/tests/test_slice.py +++ b/src/context/tests/test_slice.py @@ -12,13 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -import copy, grpc, pytest +import copy, grpc, pytest, time from common.proto.context_pb2 import ( - Context, ContextId, Device, DeviceId, Link, LinkId, Service, ServiceId, Slice, SliceId, SliceStatusEnum, Topology, + Context, ContextEvent, ContextId, Device, DeviceEvent, DeviceId, EventTypeEnum, Link, LinkEvent, LinkId, Service, ServiceEvent, ServiceId, Slice, SliceEvent, SliceId, SliceStatusEnum, Topology, TopologyEvent, TopologyId) from context.client.ContextClient import ContextClient from context.service.database.uuids.Slice import slice_get_uuid -#from context.client.EventsCollector import EventsCollector +from context.client.EventsCollector import EventsCollector from .Objects import ( CONTEXT, CONTEXT_ID, CONTEXT_NAME, DEVICE_R1, DEVICE_R1_ID, DEVICE_R2, DEVICE_R2_ID, DEVICE_R3, DEVICE_R3_ID, LINK_R1_R2, LINK_R1_R2_ID, LINK_R1_R3, LINK_R1_R3_ID, LINK_R2_R3, LINK_R2_R3_ID, SERVICE_R1_R2, SERVICE_R1_R2_ID, @@ -28,57 +28,82 @@ from .Objects import ( def test_slice(context_client : ContextClient) -> None: # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- - #events_collector = EventsCollector( - # context_client, log_events_received=True, - # activate_context_collector = False, activate_topology_collector = False, activate_device_collector = False, - # activate_link_collector = False, activate_service_collector = False, activate_slice_collector = True, - # activate_connection_collector = False) - #events_collector.start() + events_collector = EventsCollector( + context_client, log_events_received=True, + activate_context_collector = True, activate_topology_collector = True, activate_device_collector = True, + activate_link_collector = True, activate_service_collector = True, activate_slice_collector = True, + activate_connection_collector = False) + events_collector.start() + time.sleep(3) # ----- Prepare dependencies for the test and capture related events ----------------------------------------------- - context_client.SetContext(Context(**CONTEXT)) - context_client.SetTopology(Topology(**TOPOLOGY)) - context_client.SetDevice(Device(**DEVICE_R1)) - context_client.SetDevice(Device(**DEVICE_R2)) - context_client.SetDevice(Device(**DEVICE_R3)) - context_client.SetLink(Link(**LINK_R1_R2)) - context_client.SetLink(Link(**LINK_R1_R3)) - context_client.SetLink(Link(**LINK_R2_R3)) - context_client.SetService(Service(**SERVICE_R1_R2)) - context_client.SetService(Service(**SERVICE_R2_R3)) - - #events = events_collector.get_events(block=True, count=10) - #assert isinstance(events[0], ContextEvent) - #assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - #assert events[0].context_id.context_uuid.uuid == context_uuid - #assert isinstance(events[1], TopologyEvent) - #assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - #assert events[1].topology_id.context_id.context_uuid.uuid == context_uuid - #assert events[1].topology_id.topology_uuid.uuid == topology_uuid - #assert isinstance(events[2], DeviceEvent) - #assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - #assert events[2].device_id.device_uuid.uuid == device_r1_uuid - #assert isinstance(events[3], DeviceEvent) - #assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - #assert events[3].device_id.device_uuid.uuid == device_r2_uuid - #assert isinstance(events[4], DeviceEvent) - #assert events[4].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - #assert events[4].device_id.device_uuid.uuid == device_r3_uuid - #assert isinstance(events[5], LinkEvent) - #assert events[5].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - #assert events[5].link_id.link_uuid.uuid == link_r1_r2_uuid - #assert isinstance(events[6], LinkEvent) - #assert events[6].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - #assert events[6].link_id.link_uuid.uuid == link_r1_r3_uuid - #assert isinstance(events[7], LinkEvent) - #assert events[7].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - #assert events[7].link_id.link_uuid.uuid == link_r2_r3_uuid - #assert isinstance(events[8], ServiceEvent) - #assert events[8].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - #assert events[8].service_id.service_uuid.uuid == service_r1_r2_uuid - #assert isinstance(events[9], ServiceEvent) - #assert events[9].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - #assert events[9].service_id.service_uuid.uuid == service_r2_r3_uuid + response = context_client.SetContext(Context(**CONTEXT)) + context_uuid = response.context_uuid.uuid + + response = context_client.SetTopology(Topology(**TOPOLOGY)) + assert response.context_id.context_uuid.uuid == context_uuid + topology_uuid = response.topology_uuid.uuid + + response = context_client.SetDevice(Device(**DEVICE_R1)) + device_r1_uuid = response.device_uuid.uuid + + response = context_client.SetDevice(Device(**DEVICE_R2)) + device_r2_uuid = response.device_uuid.uuid + + response = context_client.SetDevice(Device(**DEVICE_R3)) + device_r3_uuid = response.device_uuid.uuid + + response = context_client.SetLink(Link(**LINK_R1_R2)) + link_r1_r2_uuid = response.link_uuid.uuid + + response = context_client.SetLink(Link(**LINK_R1_R3)) + link_r1_r3_uuid = response.link_uuid.uuid + + response = context_client.SetLink(Link(**LINK_R2_R3)) + link_r2_r3_uuid = response.link_uuid.uuid + + response = context_client.SetService(Service(**SERVICE_R1_R2)) + assert response.context_id.context_uuid.uuid == context_uuid + service_r1_r2_uuid = response.service_uuid.uuid + + response = context_client.SetService(Service(**SERVICE_R2_R3)) + assert response.context_id.context_uuid.uuid == context_uuid + service_r2_r3_uuid = response.service_uuid.uuid + + events = events_collector.get_events(block=True, count=10, timeout=1.0) + assert isinstance(events[0], ContextEvent) + assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert events[0].context_id.context_uuid.uuid == context_uuid + assert isinstance(events[1], TopologyEvent) + assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert events[1].topology_id.context_id.context_uuid.uuid == context_uuid + assert events[1].topology_id.topology_uuid.uuid == topology_uuid + assert isinstance(events[2], DeviceEvent) + assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert events[2].device_id.device_uuid.uuid == device_r1_uuid + assert isinstance(events[3], DeviceEvent) + assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert events[3].device_id.device_uuid.uuid == device_r2_uuid + assert isinstance(events[4], DeviceEvent) + assert events[4].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert events[4].device_id.device_uuid.uuid == device_r3_uuid + assert isinstance(events[5], LinkEvent) + assert events[5].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert events[5].link_id.link_uuid.uuid == link_r1_r2_uuid + assert isinstance(events[6], LinkEvent) + assert events[6].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert events[6].link_id.link_uuid.uuid == link_r1_r3_uuid + assert isinstance(events[7], LinkEvent) + assert events[7].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert events[7].link_id.link_uuid.uuid == link_r2_r3_uuid + assert isinstance(events[8], ServiceEvent) + assert events[8].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert events[8].service_id.context_id.context_uuid.uuid == context_uuid + assert events[8].service_id.service_uuid.uuid == service_r1_r2_uuid + assert isinstance(events[9], ServiceEvent) + assert events[9].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert events[9].service_id.context_id.context_uuid.uuid == context_uuid + assert events[9].service_id.service_uuid.uuid == service_r2_r3_uuid # ----- Get when the object does not exist ------------------------------------------------------------------------- slice_id = SliceId(**SLICE_R1_R3_ID) @@ -118,11 +143,11 @@ def test_slice(context_client : ContextClient) -> None: assert response.slice_uuid.uuid == slice_uuid # ----- Check create event ----------------------------------------------------------------------------------------- - #event = events_collector.get_event(block=True) - #assert isinstance(event, SliceEvent) - #assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE - #assert event.slice_id.context_id.context_uuid.uuid == context_uuid - #assert event.slice_id.slice_uuid.uuid == slice_uuid + event = events_collector.get_event(block=True, timeout=1.0) + assert isinstance(event, SliceEvent) + assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert event.slice_id.context_id.context_uuid.uuid == context_uuid + assert event.slice_id.slice_uuid.uuid == slice_uuid # ----- Get when the object exists --------------------------------------------------------------------------------- response = context_client.GetContext(ContextId(**CONTEXT_ID)) @@ -169,11 +194,11 @@ def test_slice(context_client : ContextClient) -> None: assert response.slice_uuid.uuid == slice_uuid # ----- Check update event ----------------------------------------------------------------------------------------- - #event = events_collector.get_event(block=True) - #assert isinstance(event, SliceEvent) - #assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - #assert event.slice_id.context_id.context_uuid.uuid == context_uuid - #assert event.slice_id.slice_uuid.uuid == slice_uuid + event = events_collector.get_event(block=True, timeout=1.0) + assert isinstance(event, SliceEvent) + assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + assert event.slice_id.context_id.context_uuid.uuid == context_uuid + assert event.slice_id.slice_uuid.uuid == slice_uuid # ----- Get when the object is modified ---------------------------------------------------------------------------- response = context_client.GetSlice(SliceId(**SLICE_R1_R3_ID)) @@ -205,11 +230,11 @@ def test_slice(context_client : ContextClient) -> None: context_client.RemoveSlice(SliceId(**SLICE_R1_R3_ID)) # ----- Check remove event ----------------------------------------------------------------------------------------- - #event = events_collector.get_event(block=True) - #assert isinstance(event, SliceEvent) - #assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - #assert event.slice_id.context_id.context_uuid.uuid == context_uuid - #assert event.slice_id.slice_uuid.uuid == slice_uuid + event = events_collector.get_event(block=True, timeout=1.0) + assert isinstance(event, SliceEvent) + assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert event.slice_id.context_id.context_uuid.uuid == context_uuid + assert event.slice_id.slice_uuid.uuid == slice_uuid # ----- List after deleting the object ----------------------------------------------------------------------------- response = context_client.GetContext(ContextId(**CONTEXT_ID)) @@ -235,38 +260,40 @@ def test_slice(context_client : ContextClient) -> None: context_client.RemoveTopology(TopologyId(**TOPOLOGY_ID)) context_client.RemoveContext(ContextId(**CONTEXT_ID)) - #events = events_collector.get_events(block=True, count=10) - #assert isinstance(events[0], ServiceEvent) - #assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - #assert events[0].service_id.service_uuid.uuid == service_r1_r2_uuid - #assert isinstance(events[1], ServiceEvent) - #assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - #assert events[1].service_id.service_uuid.uuid == service_r2_r3_uuid - #assert isinstance(events[2], LinkEvent) - #assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - #assert events[2].link_id.link_uuid.uuid == link_r1_r2_uuid - #assert isinstance(events[3], LinkEvent) - #assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - #assert events[3].link_id.link_uuid.uuid == link_r1_r3_uuid - #assert isinstance(events[4], LinkEvent) - #assert events[4].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - #assert events[4].link_id.link_uuid.uuid == link_r2_r3_uuid - #assert isinstance(events[5], DeviceEvent) - #assert events[5].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - #assert events[5].device_id.device_uuid.uuid == device_r1_uuid - #assert isinstance(events[6], DeviceEvent) - #assert events[6].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - #assert events[6].device_id.device_uuid.uuid == device_r2_uuid - #assert isinstance(events[7], DeviceEvent) - #assert events[7].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - #assert events[7].device_id.device_uuid.uuid == device_r3_uuid - #assert isinstance(events[8], TopologyEvent) - #assert events[8].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - #assert events[8].topology_id.context_id.context_uuid.uuid == context_uuid - #assert events[8].topology_id.topology_uuid.uuid == topology_uuid - #assert isinstance(events[9], ContextEvent) - #assert events[9].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - #assert events[9].context_id.context_uuid.uuid == context_uuid + events = events_collector.get_events(block=True, count=10) + assert isinstance(events[0], ServiceEvent) + assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert events[0].service_id.context_id.context_uuid.uuid == context_uuid + assert events[0].service_id.service_uuid.uuid == service_r1_r2_uuid + assert isinstance(events[1], ServiceEvent) + assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert events[1].service_id.context_id.context_uuid.uuid == context_uuid + assert events[1].service_id.service_uuid.uuid == service_r2_r3_uuid + assert isinstance(events[2], LinkEvent) + assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert events[2].link_id.link_uuid.uuid == link_r1_r2_uuid + assert isinstance(events[3], LinkEvent) + assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert events[3].link_id.link_uuid.uuid == link_r1_r3_uuid + assert isinstance(events[4], LinkEvent) + assert events[4].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert events[4].link_id.link_uuid.uuid == link_r2_r3_uuid + assert isinstance(events[5], DeviceEvent) + assert events[5].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert events[5].device_id.device_uuid.uuid == device_r1_uuid + assert isinstance(events[6], DeviceEvent) + assert events[6].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert events[6].device_id.device_uuid.uuid == device_r2_uuid + assert isinstance(events[7], DeviceEvent) + assert events[7].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert events[7].device_id.device_uuid.uuid == device_r3_uuid + assert isinstance(events[8], TopologyEvent) + assert events[8].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert events[8].topology_id.context_id.context_uuid.uuid == context_uuid + assert events[8].topology_id.topology_uuid.uuid == topology_uuid + assert isinstance(events[9], ContextEvent) + assert events[9].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert events[9].context_id.context_uuid.uuid == context_uuid # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- - #events_collector.stop() + events_collector.stop() -- GitLab From b8ec2a6acf18a37f03da983f3f721021b3ead521 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Fri, 13 Jan 2023 18:08:06 +0000 Subject: [PATCH 055/158] Context: - cosmetic changes --- src/context/service/__main__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/context/service/__main__.py b/src/context/service/__main__.py index f15c8fde0..63e9c611c 100644 --- a/src/context/service/__main__.py +++ b/src/context/service/__main__.py @@ -30,7 +30,7 @@ LOGGER = logging.getLogger(__name__) terminate = threading.Event() -def signal_handler(signal, frame): # pylint: disable=redefined-outer-name +def signal_handler(signal, frame): # pylint: disable=redefined-outer-name,unused-argument LOGGER.warning('Terminate signal received') terminate.set() -- GitLab From 59af354425c1a29b809b5c352be9ee49865f59e8 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Fri, 13 Jan 2023 18:27:49 +0000 Subject: [PATCH 056/158] Context: - configured constant with event collection timeout for unitary tests and debug purposes - cosmetic changes - migrated event reporting for Connection entity --- .../service/ContextServiceServicerImpl.py | 22 +-- src/context/service/database/Connection.py | 49 +++-- .../database/models/ConnectionModel.py | 4 +- src/context/tests/test_connection.py | 180 +++++++++--------- src/context/tests/test_context.py | 8 +- src/context/tests/test_device.py | 12 +- src/context/tests/test_link.py | 12 +- src/context/tests/test_service.py | 12 +- src/context/tests/test_slice.py | 14 +- src/context/tests/test_topology.py | 12 +- 10 files changed, 173 insertions(+), 152 deletions(-) diff --git a/src/context/service/ContextServiceServicerImpl.py b/src/context/service/ContextServiceServicerImpl.py index 34608d619..f5b2662b3 100644 --- a/src/context/service/ContextServiceServicerImpl.py +++ b/src/context/service/ContextServiceServicerImpl.py @@ -278,29 +278,29 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def ListConnectionIds(self, request : ServiceId, context : grpc.ServicerContext) -> ConnectionIdList: - return connection_list_ids(self.db_engine, request) + return ConnectionIdList(connection_ids=connection_list_ids(self.db_engine, request)) @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def ListConnections(self, request : ContextId, context : grpc.ServicerContext) -> ConnectionList: - return connection_list_objs(self.db_engine, request) + return ConnectionList(connections=connection_list_objs(self.db_engine, request)) @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def GetConnection(self, request : ConnectionId, context : grpc.ServicerContext) -> Connection: - return connection_get(self.db_engine, request) + return Connection(**connection_get(self.db_engine, request)) @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def SetConnection(self, request : Connection, context : grpc.ServicerContext) -> ConnectionId: - connection_id,updated = connection_set(self.db_engine, request) # pylint: disable=unused-variable - #event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE - #notify_event(self.messagebroker, TOPIC_CONNECTION, event_type, {'connection_id': connection_id}) - return connection_id + connection_id,updated = connection_set(self.db_engine, request) + event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE + notify_event(self.messagebroker, TOPIC_CONNECTION, event_type, {'connection_id': connection_id}) + return ConnectionId(**connection_id) @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def RemoveConnection(self, request : ConnectionId, context : grpc.ServicerContext) -> Empty: - deleted = connection_delete(self.db_engine, request) # pylint: disable=unused-variable - #if deleted: - # event_type = EventTypeEnum.EVENTTYPE_REMOVE - # notify_event(self.messagebroker, TOPIC_CONNECTION, event_type, {'connection_id': request}) + connection_id,deleted = connection_delete(self.db_engine, request) + if deleted: + event_type = EventTypeEnum.EVENTTYPE_REMOVE + notify_event(self.messagebroker, TOPIC_CONNECTION, event_type, {'connection_id': connection_id}) return Empty() @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) diff --git a/src/context/service/database/Connection.py b/src/context/service/database/Connection.py index 2f6fb8433..f1616e96e 100644 --- a/src/context/service/database/Connection.py +++ b/src/context/service/database/Connection.py @@ -19,7 +19,7 @@ from sqlalchemy.exc import IntegrityError from sqlalchemy.orm import Session, sessionmaker from sqlalchemy_cockroachdb import run_transaction from typing import Dict, List, Optional, Tuple -from common.proto.context_pb2 import Connection, ConnectionId, ConnectionIdList, ConnectionList, ServiceId +from common.proto.context_pb2 import Connection, ConnectionId, ServiceId from common.method_wrappers.ServiceExceptions import NotFoundException from common.tools.grpc.Tools import grpc_message_to_json_string from common.tools.object_factory.Connection import json_connection_id @@ -28,23 +28,23 @@ from .uuids.Connection import connection_get_uuid from .uuids.EndPoint import endpoint_get_uuid from .uuids.Service import service_get_uuid -def connection_list_ids(db_engine : Engine, request : ServiceId) -> ConnectionIdList: +LOGGER = logging.getLogger(__name__) + +def connection_list_ids(db_engine : Engine, request : ServiceId) -> List[Dict]: _,service_uuid = service_get_uuid(request, allow_random=False) def callback(session : Session) -> List[Dict]: obj_list : List[ConnectionModel] = session.query(ConnectionModel).filter_by(service_uuid=service_uuid).all() - #.options(selectinload(ContextModel.connection)).filter_by(context_uuid=context_uuid).one_or_none() return [obj.dump_id() for obj in obj_list] - return ConnectionIdList(connection_ids=run_transaction(sessionmaker(bind=db_engine), callback)) + return run_transaction(sessionmaker(bind=db_engine), callback) -def connection_list_objs(db_engine : Engine, request : ServiceId) -> ConnectionList: +def connection_list_objs(db_engine : Engine, request : ServiceId) -> List[Dict]: _,service_uuid = service_get_uuid(request, allow_random=False) def callback(session : Session) -> List[Dict]: obj_list : List[ConnectionModel] = session.query(ConnectionModel).filter_by(service_uuid=service_uuid).all() - #.options(selectinload(ContextModel.connection)).filter_by(context_uuid=context_uuid).one_or_none() return [obj.dump() for obj in obj_list] - return ConnectionList(connections=run_transaction(sessionmaker(bind=db_engine), callback)) + return run_transaction(sessionmaker(bind=db_engine), callback) -def connection_get(db_engine : Engine, request : ConnectionId) -> Connection: +def connection_get(db_engine : Engine, request : ConnectionId) -> Dict: connection_uuid = connection_get_uuid(request, allow_random=False) def callback(session : Session) -> Optional[Dict]: obj : Optional[ConnectionModel] = session.query(ConnectionModel)\ @@ -55,17 +55,21 @@ def connection_get(db_engine : Engine, request : ConnectionId) -> Connection: raise NotFoundException('Connection', request.connection_uuid.uuid, extra_details=[ 'connection_uuid generated was: {:s}'.format(connection_uuid), ]) - return Connection(**obj) + return obj -def connection_set(db_engine : Engine, request : Connection) -> Tuple[ConnectionId, bool]: +def connection_set(db_engine : Engine, request : Connection) -> Tuple[Dict, bool]: connection_uuid = connection_get_uuid(request.connection_id, allow_random=True) _,service_uuid = service_get_uuid(request.service_id, allow_random=False) settings = grpc_message_to_json_string(request.settings), + now = datetime.datetime.utcnow() + connection_data = [{ 'connection_uuid': connection_uuid, 'service_uuid' : service_uuid, 'settings' : settings, + 'created_at' : now, + 'updated_at' : now, }] connection_endpoints_data : List[Dict] = list() @@ -78,21 +82,27 @@ def connection_set(db_engine : Engine, request : Connection) -> Tuple[Connection }) connection_subservices_data : List[Dict] = list() - for i,service_id in enumerate(request.sub_service_ids): + for service_id in request.sub_service_ids: _, service_uuid = service_get_uuid(service_id, allow_random=False) connection_subservices_data.append({ 'connection_uuid': connection_uuid, 'subservice_uuid': service_uuid, }) - def callback(session : Session) -> None: + def callback(session : Session) -> bool: stmt = insert(ConnectionModel).values(connection_data) stmt = stmt.on_conflict_do_update( index_elements=[ConnectionModel.connection_uuid], - set_=dict(settings = stmt.excluded.settings) + set_=dict( + settings = stmt.excluded.settings, + updated_at = stmt.excluded.updated_at, + ) ) - session.execute(stmt) + stmt = stmt.returning(ConnectionModel.created_at, ConnectionModel.updated_at) + created_at,updated_at = session.execute(stmt).fetchone() + updated = updated_at > created_at + # TODO: manage update connection endpoints if len(connection_endpoints_data) > 0: stmt = insert(ConnectionEndPointModel).values(connection_endpoints_data) stmt = stmt.on_conflict_do_nothing( @@ -115,6 +125,7 @@ def connection_set(db_engine : Engine, request : Connection) -> Tuple[Connection else: raise + # TODO: manage update connection subservices if len(connection_subservices_data) > 0: stmt = insert(ConnectionSubServiceModel).values(connection_subservices_data) stmt = stmt.on_conflict_do_nothing( @@ -122,13 +133,15 @@ def connection_set(db_engine : Engine, request : Connection) -> Tuple[Connection ) session.execute(stmt) - run_transaction(sessionmaker(bind=db_engine), callback) - updated = False # TODO: improve and check if created/updated + return updated + + updated = run_transaction(sessionmaker(bind=db_engine), callback) return ConnectionId(**json_connection_id(connection_uuid)),updated -def connection_delete(db_engine : Engine, request : ConnectionId) -> bool: +def connection_delete(db_engine : Engine, request : ConnectionId) -> Tuple[Dict, bool]: connection_uuid = connection_get_uuid(request, allow_random=False) def callback(session : Session) -> bool: num_deleted = session.query(ConnectionModel).filter_by(connection_uuid=connection_uuid).delete() return num_deleted > 0 - return run_transaction(sessionmaker(bind=db_engine), callback) + deleted = run_transaction(sessionmaker(bind=db_engine), callback) + return ConnectionId(**json_connection_id(connection_uuid)),deleted diff --git a/src/context/service/database/models/ConnectionModel.py b/src/context/service/database/models/ConnectionModel.py index a1d45a934..966dcab4d 100644 --- a/src/context/service/database/models/ConnectionModel.py +++ b/src/context/service/database/models/ConnectionModel.py @@ -13,7 +13,7 @@ # limitations under the License. import json, logging, operator -from sqlalchemy import Column, ForeignKey, Integer, CheckConstraint, String +from sqlalchemy import Column, DateTime, ForeignKey, Integer, CheckConstraint, String from sqlalchemy.dialects.postgresql import UUID from sqlalchemy.orm import relationship from typing import Dict @@ -27,6 +27,8 @@ class ConnectionModel(_Base): connection_uuid = Column(UUID(as_uuid=False), primary_key=True) service_uuid = Column(ForeignKey('service.service_uuid', ondelete='RESTRICT'), nullable=False) settings = Column(String, nullable=False) + created_at = Column(DateTime, nullable=False) + updated_at = Column(DateTime, nullable=False) connection_service = relationship('ServiceModel') # back_populates='connections' connection_endpoints = relationship('ConnectionEndPointModel') # lazy='joined', back_populates='connection' diff --git a/src/context/tests/test_connection.py b/src/context/tests/test_connection.py index 4cc5407b4..909ddb6ef 100644 --- a/src/context/tests/test_connection.py +++ b/src/context/tests/test_connection.py @@ -12,28 +12,31 @@ # See the License for the specific language governing permissions and # limitations under the License. -import copy, grpc, pytest +import copy, grpc, pytest, time from common.proto.context_pb2 import ( - Connection, ConnectionId, Context, ContextId, Device, DeviceId, EndPointId, Service, ServiceId, Topology, TopologyId) + Connection, ConnectionEvent, ConnectionId, Context, ContextEvent, ContextId, Device, DeviceEvent, DeviceId, EndPointId, EventTypeEnum, Service, ServiceEvent, ServiceId, Topology, TopologyEvent, TopologyId) from context.client.ContextClient import ContextClient from context.service.database.uuids.Connection import connection_get_uuid from context.service.database.uuids.EndPoint import endpoint_get_uuid -#from context.client.EventsCollector import EventsCollector +from context.client.EventsCollector import EventsCollector from .Objects import ( CONNECTION_R1_R3, CONNECTION_R1_R3_ID, CONNECTION_R1_R3_NAME, CONTEXT, CONTEXT_ID, DEVICE_R1, DEVICE_R1_ID, DEVICE_R2, DEVICE_R2_ID, DEVICE_R3, DEVICE_R3_ID, SERVICE_R1_R2, SERVICE_R1_R2_ID, SERVICE_R1_R3, SERVICE_R1_R3_ID, SERVICE_R2_R3, SERVICE_R2_R3_ID, TOPOLOGY, TOPOLOGY_ID) +GET_EVENTS_TIMEOUT = 10.0 + @pytest.mark.depends(on=['context/tests/test_service.py::test_service', 'context/tests/test_slice.py::test_slice']) def test_connection(context_client : ContextClient) -> None: # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- - #events_collector = EventsCollector( - # context_client, log_events_received=True, - # activate_context_collector = False, activate_topology_collector = False, activate_device_collector = False, - # activate_link_collector = False, activate_service_collector = False, activate_slice_collector = False, - # activate_connection_collector = True) - #events_collector.start() + events_collector = EventsCollector( + context_client, log_events_received=True, + activate_context_collector = True, activate_topology_collector = True, activate_device_collector = True, + activate_link_collector = True, activate_service_collector = True, activate_slice_collector = True, + activate_connection_collector = True) + events_collector.start() + time.sleep(3) # ----- Prepare dependencies for the test and capture related events ----------------------------------------------- response = context_client.SetContext(Context(**CONTEXT)) @@ -47,61 +50,52 @@ def test_connection(context_client : ContextClient) -> None: device_r1_uuid = response.device_uuid.uuid response = context_client.SetDevice(Device(**DEVICE_R2)) - device_r2_uuid = response.device_uuid.uuid # pylint: disable=unused-variable + device_r2_uuid = response.device_uuid.uuid response = context_client.SetDevice(Device(**DEVICE_R3)) - device_r3_uuid = response.device_uuid.uuid # pylint: disable=unused-variable + device_r3_uuid = response.device_uuid.uuid response = context_client.SetService(Service(**SERVICE_R1_R2)) assert response.context_id.context_uuid.uuid == context_uuid - service_r1_r2_uuid = response.service_uuid.uuid # pylint: disable=unused-variable + service_r1_r2_uuid = response.service_uuid.uuid response = context_client.SetService(Service(**SERVICE_R2_R3)) assert response.context_id.context_uuid.uuid == context_uuid - service_r2_r3_uuid = response.service_uuid.uuid # pylint: disable=unused-variable + service_r2_r3_uuid = response.service_uuid.uuid response = context_client.SetService(Service(**SERVICE_R1_R3)) assert response.context_id.context_uuid.uuid == context_uuid service_r1_r3_uuid = response.service_uuid.uuid - #events = events_collector.get_events(block=True, count=8) - #assert isinstance(events[0], ContextEvent) - #assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - #assert events[0].context_id.context_uuid.uuid == context_uuid - #assert isinstance(events[1], TopologyEvent) - #assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - #assert events[1].topology_id.context_id.context_uuid.uuid == context_uuid - #assert events[1].topology_id.topology_uuid.uuid == topology_uuid - #assert isinstance(events[2], DeviceEvent) - #assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - #assert events[2].device_id.device_uuid.uuid == device_r1_uuid - #assert isinstance(events[3], DeviceEvent) - #assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - #assert events[3].device_id.device_uuid.uuid == device_r2_uuid - #assert isinstance(events[4], DeviceEvent) - #assert events[4].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - #assert events[4].device_id.device_uuid.uuid == device_r3_uuid - #assert isinstance(events[5], ServiceEvent) - #assert events[5].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - #assert events[5].service_id.context_id.context_uuid.uuid == context_uuid - #assert events[5].service_id.service_uuid.uuid == service_r1_r2_uuid - #assert isinstance(events[6], ContextEvent) - #assert events[6].event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - #assert events[6].context_id.context_uuid.uuid == context_uuid - #assert isinstance(events[7], ServiceEvent) - #assert events[7].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - #assert events[7].service_id.context_id.context_uuid.uuid == context_uuid - #assert events[7].service_id.service_uuid.uuid == service_r2_r3_uuid - #assert isinstance(events[8], ContextEvent) - #assert events[8].event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - #assert events[8].context_id.context_uuid.uuid == context_uuid - #assert isinstance(events[9], ServiceEvent) - #assert events[9].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - #assert events[9].service_id.context_id.context_uuid.uuid == context_uuid - #assert events[9].service_id.service_uuid.uuid == service_r1_r3_uuid - #assert isinstance(events[10], ContextEvent) - #assert events[10].event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - #assert events[10].context_id.context_uuid.uuid == context_uuid + events = events_collector.get_events(block=True, count=8, timeout=GET_EVENTS_TIMEOUT) + assert isinstance(events[0], ContextEvent) + assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert events[0].context_id.context_uuid.uuid == context_uuid + assert isinstance(events[1], TopologyEvent) + assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert events[1].topology_id.context_id.context_uuid.uuid == context_uuid + assert events[1].topology_id.topology_uuid.uuid == topology_uuid + assert isinstance(events[2], DeviceEvent) + assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert events[2].device_id.device_uuid.uuid == device_r1_uuid + assert isinstance(events[3], DeviceEvent) + assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert events[3].device_id.device_uuid.uuid == device_r2_uuid + assert isinstance(events[4], DeviceEvent) + assert events[4].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert events[4].device_id.device_uuid.uuid == device_r3_uuid + assert isinstance(events[5], ServiceEvent) + assert events[5].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert events[5].service_id.context_id.context_uuid.uuid == context_uuid + assert events[5].service_id.service_uuid.uuid == service_r1_r2_uuid + assert isinstance(events[6], ServiceEvent) + assert events[6].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert events[6].service_id.context_id.context_uuid.uuid == context_uuid + assert events[6].service_id.service_uuid.uuid == service_r2_r3_uuid + assert isinstance(events[7], ServiceEvent) + assert events[7].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert events[7].service_id.context_id.context_uuid.uuid == context_uuid + assert events[7].service_id.service_uuid.uuid == service_r1_r3_uuid # ----- Get when the object does not exist ------------------------------------------------------------------------- connection_id = ConnectionId(**CONNECTION_R1_R3_ID) @@ -137,10 +131,10 @@ def test_connection(context_client : ContextClient) -> None: connection_r1_r3_uuid = response.connection_uuid.uuid # ----- Check create event ----------------------------------------------------------------------------------------- - #event = events_collector.get_event(block=True) - #assert isinstance(event, ConnectionEvent) - #assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE - #assert event.connection_id.connection_uuid.uuid == connection_r1_r3_uuid + event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) + assert isinstance(event, ConnectionEvent) + assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE + assert event.connection_id.connection_uuid.uuid == connection_r1_r3_uuid # ----- Get when the object exists --------------------------------------------------------------------------------- response = context_client.GetConnection(ConnectionId(**CONNECTION_R1_R3_ID)) @@ -167,10 +161,10 @@ def test_connection(context_client : ContextClient) -> None: assert response.connection_uuid.uuid == connection_r1_r3_uuid # ----- Check update event ----------------------------------------------------------------------------------------- - #event = events_collector.get_event(block=True) - #assert isinstance(event, ConnectionEvent) - #assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - #assert event.connection_id.connection_uuid.uuid == connection_r1_r3_uuid + event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) + assert isinstance(event, ConnectionEvent) + assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + assert event.connection_id.connection_uuid.uuid == connection_r1_r3_uuid # ----- Get when the object is modified ---------------------------------------------------------------------------- response = context_client.GetConnection(ConnectionId(**CONNECTION_R1_R3_ID)) @@ -195,10 +189,10 @@ def test_connection(context_client : ContextClient) -> None: context_client.RemoveConnection(ConnectionId(**CONNECTION_R1_R3_ID)) # ----- Check remove event ----------------------------------------------------------------------------------------- - #event = events_collector.get_event(block=True) - #assert isinstance(event, ConnectionEvent) - #assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - #assert event.connection_id.connection_uuid.uuid == connection_r1_r3_uuid + event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) + assert isinstance(event, ConnectionEvent) + assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert event.connection_id.connection_uuid.uuid == connection_r1_r3_uuid # ----- List after deleting the object ----------------------------------------------------------------------------- response = context_client.ListConnectionIds(ServiceId(**SERVICE_R1_R3_ID)) @@ -217,35 +211,35 @@ def test_connection(context_client : ContextClient) -> None: context_client.RemoveTopology(TopologyId(**TOPOLOGY_ID)) context_client.RemoveContext(ContextId(**CONTEXT_ID)) - #events = events_collector.get_events(block=True, count=8) - #assert isinstance(events[0], ServiceEvent) - #assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - #assert events[0].service_id.context_id.context_uuid.uuid == context_uuid - #assert events[0].service_id.service_uuid.uuid == service_r1_r3_uuid - #assert isinstance(events[1], ServiceEvent) - #assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - #assert events[1].service_id.context_id.context_uuid.uuid == context_uuid - #assert events[1].service_id.service_uuid.uuid == service_r2_r3_uuid - #assert isinstance(events[2], ServiceEvent) - #assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - #assert events[2].service_id.context_id.context_uuid.uuid == context_uuid - #assert events[2].service_id.service_uuid.uuid == service_r1_r2_uuid - #assert isinstance(events[3], DeviceEvent) - #assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - #assert events[3].device_id.device_uuid.uuid == device_r1_uuid - #assert isinstance(events[4], DeviceEvent) - #assert events[4].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - #assert events[4].device_id.device_uuid.uuid == device_r2_uuid - #assert isinstance(events[5], DeviceEvent) - #assert events[5].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - #assert events[5].device_id.device_uuid.uuid == device_r3_uuid - #assert isinstance(events[6], TopologyEvent) - #assert events[6].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - #assert events[6].topology_id.context_id.context_uuid.uuid == context_uuid - #assert events[6].topology_id.topology_uuid.uuid == topology_uuid - #assert isinstance(events[7], ContextEvent) - #assert events[7].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - #assert events[7].context_id.context_uuid.uuid == context_uuid + events = events_collector.get_events(block=True, count=8, timeout=GET_EVENTS_TIMEOUT) + assert isinstance(events[0], ServiceEvent) + assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert events[0].service_id.context_id.context_uuid.uuid == context_uuid + assert events[0].service_id.service_uuid.uuid == service_r1_r3_uuid + assert isinstance(events[1], ServiceEvent) + assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert events[1].service_id.context_id.context_uuid.uuid == context_uuid + assert events[1].service_id.service_uuid.uuid == service_r2_r3_uuid + assert isinstance(events[2], ServiceEvent) + assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert events[2].service_id.context_id.context_uuid.uuid == context_uuid + assert events[2].service_id.service_uuid.uuid == service_r1_r2_uuid + assert isinstance(events[3], DeviceEvent) + assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert events[3].device_id.device_uuid.uuid == device_r1_uuid + assert isinstance(events[4], DeviceEvent) + assert events[4].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert events[4].device_id.device_uuid.uuid == device_r2_uuid + assert isinstance(events[5], DeviceEvent) + assert events[5].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert events[5].device_id.device_uuid.uuid == device_r3_uuid + assert isinstance(events[6], TopologyEvent) + assert events[6].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert events[6].topology_id.context_id.context_uuid.uuid == context_uuid + assert events[6].topology_id.topology_uuid.uuid == topology_uuid + assert isinstance(events[7], ContextEvent) + assert events[7].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + assert events[7].context_id.context_uuid.uuid == context_uuid # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- - #events_collector.stop() + events_collector.stop() diff --git a/src/context/tests/test_context.py b/src/context/tests/test_context.py index 4337db239..77f1dc380 100644 --- a/src/context/tests/test_context.py +++ b/src/context/tests/test_context.py @@ -19,6 +19,8 @@ from context.service.database.uuids.Context import context_get_uuid from context.client.EventsCollector import EventsCollector from .Objects import CONTEXT, CONTEXT_ID, CONTEXT_NAME +GET_EVENTS_TIMEOUT = 10.0 + def test_context(context_client : ContextClient) -> None: # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- @@ -51,7 +53,7 @@ def test_context(context_client : ContextClient) -> None: assert response.context_uuid.uuid == context_uuid # ----- Check create event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True, timeout=1.0) + event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) assert isinstance(event, ContextEvent) assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE assert event.context_id.context_uuid.uuid == context_uuid @@ -85,7 +87,7 @@ def test_context(context_client : ContextClient) -> None: assert response.context_uuid.uuid == context_uuid # ----- Check update event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True, timeout=1.0) + event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) assert isinstance(event, ContextEvent) assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE assert event.context_id.context_uuid.uuid == context_uuid @@ -115,7 +117,7 @@ def test_context(context_client : ContextClient) -> None: context_client.RemoveContext(ContextId(**CONTEXT_ID)) # ----- Check remove event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True, timeout=1.0) + event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) assert isinstance(event, ContextEvent) assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE assert event.context_id.context_uuid.uuid == context_uuid diff --git a/src/context/tests/test_device.py b/src/context/tests/test_device.py index 6e2fdd52d..bcbe4cc3b 100644 --- a/src/context/tests/test_device.py +++ b/src/context/tests/test_device.py @@ -21,6 +21,8 @@ from context.service.database.uuids.Device import device_get_uuid from context.client.EventsCollector import EventsCollector from .Objects import CONTEXT, CONTEXT_ID, DEVICE_R1, DEVICE_R1_ID, DEVICE_R1_NAME, TOPOLOGY, TOPOLOGY_ID +GET_EVENTS_TIMEOUT = 10.0 + @pytest.mark.depends(on=['context/tests/test_topology.py::test_topology']) def test_device(context_client : ContextClient) -> None: @@ -41,7 +43,7 @@ def test_device(context_client : ContextClient) -> None: assert response.context_id.context_uuid.uuid == context_uuid topology_uuid = response.topology_uuid.uuid - events = events_collector.get_events(block=True, count=2, timeout=1.0) + events = events_collector.get_events(block=True, count=2, timeout=GET_EVENTS_TIMEOUT) assert isinstance(events[0], ContextEvent) assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE assert events[0].context_id.context_uuid.uuid == context_uuid @@ -81,7 +83,7 @@ def test_device(context_client : ContextClient) -> None: assert response.device_uuid.uuid == device_uuid # ----- Check create event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True, timeout=1.0) + event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) assert isinstance(event, DeviceEvent) assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE assert event.device_id.device_uuid.uuid == device_uuid @@ -124,7 +126,7 @@ def test_device(context_client : ContextClient) -> None: assert response.device_uuid.uuid == device_uuid # ----- Check update event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True, timeout=1.0) + event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) assert isinstance(event, DeviceEvent) assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE assert event.device_id.device_uuid.uuid == device_uuid @@ -170,7 +172,7 @@ def test_device(context_client : ContextClient) -> None: context_client.RemoveDevice(DeviceId(**DEVICE_R1_ID)) # ----- Check remove event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True, timeout=1.0) + event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) assert isinstance(event, DeviceEvent) assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE assert event.device_id.device_uuid.uuid == device_uuid @@ -192,7 +194,7 @@ def test_device(context_client : ContextClient) -> None: context_client.RemoveTopology(TopologyId(**TOPOLOGY_ID)) context_client.RemoveContext(ContextId(**CONTEXT_ID)) - events = events_collector.get_events(block=True, count=2, timeout=1.0) + events = events_collector.get_events(block=True, count=2, timeout=GET_EVENTS_TIMEOUT) assert isinstance(events[0], TopologyEvent) assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE assert events[0].topology_id.context_id.context_uuid.uuid == context_uuid diff --git a/src/context/tests/test_link.py b/src/context/tests/test_link.py index 59fed4870..c8ed1d486 100644 --- a/src/context/tests/test_link.py +++ b/src/context/tests/test_link.py @@ -23,6 +23,8 @@ from .Objects import ( CONTEXT, CONTEXT_ID, DEVICE_R1, DEVICE_R1_ID, DEVICE_R2, DEVICE_R2_ID, LINK_R1_R2, LINK_R1_R2_ID, LINK_R1_R2_NAME, TOPOLOGY, TOPOLOGY_ID) +GET_EVENTS_TIMEOUT = 10.0 + @pytest.mark.depends(on=['context/tests/test_device.py::test_device']) def test_link(context_client : ContextClient) -> None: @@ -49,7 +51,7 @@ def test_link(context_client : ContextClient) -> None: response = context_client.SetDevice(Device(**DEVICE_R2)) device_r2_uuid = response.device_uuid.uuid - events = events_collector.get_events(block=True, count=4, timeout=1.0) + events = events_collector.get_events(block=True, count=4, timeout=GET_EVENTS_TIMEOUT) assert isinstance(events[0], ContextEvent) assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE assert events[0].context_id.context_uuid.uuid == context_uuid @@ -85,7 +87,7 @@ def test_link(context_client : ContextClient) -> None: assert response.link_uuid.uuid == link_uuid # ----- Check create event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True, timeout=1.0) + event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) assert isinstance(event, LinkEvent) assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE assert event.link_id.link_uuid.uuid == link_uuid @@ -115,7 +117,7 @@ def test_link(context_client : ContextClient) -> None: assert response.link_uuid.uuid == link_uuid # ----- Check update event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True, timeout=1.0) + event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) assert isinstance(event, LinkEvent) assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE assert event.link_id.link_uuid.uuid == link_uuid @@ -151,7 +153,7 @@ def test_link(context_client : ContextClient) -> None: context_client.RemoveLink(LinkId(**LINK_R1_R2_ID)) # ----- Check remove event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True, timeout=1.0) + event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) assert isinstance(event, LinkEvent) assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE assert event.link_id.link_uuid.uuid == link_uuid @@ -177,7 +179,7 @@ def test_link(context_client : ContextClient) -> None: context_client.RemoveTopology(TopologyId(**TOPOLOGY_ID)) context_client.RemoveContext(ContextId(**CONTEXT_ID)) - events = events_collector.get_events(block=True, count=4, timeout=1.0) + events = events_collector.get_events(block=True, count=4, timeout=GET_EVENTS_TIMEOUT) assert isinstance(events[0], DeviceEvent) assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE assert events[0].device_id.device_uuid.uuid == device_r1_uuid diff --git a/src/context/tests/test_service.py b/src/context/tests/test_service.py index e80437dbb..4e46c24ad 100644 --- a/src/context/tests/test_service.py +++ b/src/context/tests/test_service.py @@ -23,6 +23,8 @@ from .Objects import ( CONTEXT, CONTEXT_ID, CONTEXT_NAME, DEVICE_R1, DEVICE_R1_ID, SERVICE_R1_R2_NAME, DEVICE_R2, DEVICE_R2_ID, SERVICE_R1_R2, SERVICE_R1_R2_ID, TOPOLOGY, TOPOLOGY_ID) +GET_EVENTS_TIMEOUT = 10.0 + @pytest.mark.depends(on=['context/tests/test_link.py::test_link']) def test_service(context_client : ContextClient) -> None: @@ -49,7 +51,7 @@ def test_service(context_client : ContextClient) -> None: response = context_client.SetDevice(Device(**DEVICE_R2)) device_r2_uuid = response.device_uuid.uuid - events = events_collector.get_events(block=True, count=4, timeout=1.0) + events = events_collector.get_events(block=True, count=4, timeout=GET_EVENTS_TIMEOUT) assert isinstance(events[0], ContextEvent) assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE assert events[0].context_id.context_uuid.uuid == context_uuid @@ -102,7 +104,7 @@ def test_service(context_client : ContextClient) -> None: assert response.service_uuid.uuid == service_uuid # ----- Check create event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True, timeout=1.0) + event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) assert isinstance(event, ServiceEvent) assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE assert event.service_id.context_id.context_uuid.uuid == context_uuid @@ -155,7 +157,7 @@ def test_service(context_client : ContextClient) -> None: assert response.service_uuid.uuid == service_uuid # ----- Check update event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True, timeout=1.0) + event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) assert isinstance(event, ServiceEvent) assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE assert event.service_id.context_id.context_uuid.uuid == context_uuid @@ -193,7 +195,7 @@ def test_service(context_client : ContextClient) -> None: context_client.RemoveService(ServiceId(**SERVICE_R1_R2_ID)) # ----- Check remove event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True, timeout=1.0) + event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) assert isinstance(event, ServiceEvent) assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE assert event.service_id.context_id.context_uuid.uuid == context_uuid @@ -217,7 +219,7 @@ def test_service(context_client : ContextClient) -> None: context_client.RemoveTopology(TopologyId(**TOPOLOGY_ID)) context_client.RemoveContext(ContextId(**CONTEXT_ID)) - events = events_collector.get_events(block=True, count=4, timeout=1.0) + events = events_collector.get_events(block=True, count=4, timeout=GET_EVENTS_TIMEOUT) assert isinstance(events[0], DeviceEvent) assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE assert events[0].device_id.device_uuid.uuid == device_r1_uuid diff --git a/src/context/tests/test_slice.py b/src/context/tests/test_slice.py index cb7eb7737..6996bb39e 100644 --- a/src/context/tests/test_slice.py +++ b/src/context/tests/test_slice.py @@ -14,8 +14,8 @@ import copy, grpc, pytest, time from common.proto.context_pb2 import ( - Context, ContextEvent, ContextId, Device, DeviceEvent, DeviceId, EventTypeEnum, Link, LinkEvent, LinkId, Service, ServiceEvent, ServiceId, Slice, SliceEvent, SliceId, SliceStatusEnum, Topology, TopologyEvent, - TopologyId) + Context, ContextEvent, ContextId, Device, DeviceEvent, DeviceId, EventTypeEnum, Link, LinkEvent, LinkId, Service, + ServiceEvent, ServiceId, Slice, SliceEvent, SliceId, SliceStatusEnum, Topology, TopologyEvent, TopologyId) from context.client.ContextClient import ContextClient from context.service.database.uuids.Slice import slice_get_uuid from context.client.EventsCollector import EventsCollector @@ -24,6 +24,8 @@ from .Objects import ( LINK_R1_R2, LINK_R1_R2_ID, LINK_R1_R3, LINK_R1_R3_ID, LINK_R2_R3, LINK_R2_R3_ID, SERVICE_R1_R2, SERVICE_R1_R2_ID, SERVICE_R2_R3, SERVICE_R2_R3_ID, SLICE_R1_R3, SLICE_R1_R3_ID, SLICE_R1_R3_NAME, TOPOLOGY, TOPOLOGY_ID) +GET_EVENTS_TIMEOUT = 10.0 + @pytest.mark.depends(on=['context/tests/test_service.py::test_service']) def test_slice(context_client : ContextClient) -> None: @@ -70,7 +72,7 @@ def test_slice(context_client : ContextClient) -> None: assert response.context_id.context_uuid.uuid == context_uuid service_r2_r3_uuid = response.service_uuid.uuid - events = events_collector.get_events(block=True, count=10, timeout=1.0) + events = events_collector.get_events(block=True, count=10, timeout=GET_EVENTS_TIMEOUT) assert isinstance(events[0], ContextEvent) assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE assert events[0].context_id.context_uuid.uuid == context_uuid @@ -143,7 +145,7 @@ def test_slice(context_client : ContextClient) -> None: assert response.slice_uuid.uuid == slice_uuid # ----- Check create event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True, timeout=1.0) + event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) assert isinstance(event, SliceEvent) assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE assert event.slice_id.context_id.context_uuid.uuid == context_uuid @@ -194,7 +196,7 @@ def test_slice(context_client : ContextClient) -> None: assert response.slice_uuid.uuid == slice_uuid # ----- Check update event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True, timeout=1.0) + event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) assert isinstance(event, SliceEvent) assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE assert event.slice_id.context_id.context_uuid.uuid == context_uuid @@ -230,7 +232,7 @@ def test_slice(context_client : ContextClient) -> None: context_client.RemoveSlice(SliceId(**SLICE_R1_R3_ID)) # ----- Check remove event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True, timeout=1.0) + event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) assert isinstance(event, SliceEvent) assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE assert event.slice_id.context_id.context_uuid.uuid == context_uuid diff --git a/src/context/tests/test_topology.py b/src/context/tests/test_topology.py index 311e0f874..6a3367d49 100644 --- a/src/context/tests/test_topology.py +++ b/src/context/tests/test_topology.py @@ -20,6 +20,8 @@ from context.service.database.uuids.Topology import topology_get_uuid from context.client.EventsCollector import EventsCollector from .Objects import CONTEXT, CONTEXT_ID, CONTEXT_NAME, TOPOLOGY, TOPOLOGY_ID, TOPOLOGY_NAME +GET_EVENTS_TIMEOUT = 10.0 + @pytest.mark.depends(on=['context/tests/test_context.py::test_context']) def test_topology(context_client : ContextClient) -> None: @@ -36,7 +38,7 @@ def test_topology(context_client : ContextClient) -> None: response = context_client.SetContext(Context(**CONTEXT)) context_uuid = response.context_uuid.uuid - event = events_collector.get_event(block=True, timeout=1.0) + event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) assert isinstance(event, ContextEvent) assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE assert event.context_id.context_uuid.uuid == context_uuid @@ -68,7 +70,7 @@ def test_topology(context_client : ContextClient) -> None: assert response.topology_uuid.uuid == topology_uuid # ----- Check create event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True, timeout=1.0) + event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) assert isinstance(event, TopologyEvent) assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE assert event.topology_id.context_id.context_uuid.uuid == context_uuid @@ -114,7 +116,7 @@ def test_topology(context_client : ContextClient) -> None: assert response.topology_uuid.uuid == topology_uuid # ----- Check update event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True, timeout=1.0) + event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) assert isinstance(event, TopologyEvent) assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE assert event.topology_id.context_id.context_uuid.uuid == context_uuid @@ -146,7 +148,7 @@ def test_topology(context_client : ContextClient) -> None: context_client.RemoveTopology(TopologyId(**TOPOLOGY_ID)) # ----- Check remove event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True, timeout=1.0) + event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) assert isinstance(event, TopologyEvent) assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE assert event.topology_id.context_id.context_uuid.uuid == context_uuid @@ -167,7 +169,7 @@ def test_topology(context_client : ContextClient) -> None: # ----- Clean dependencies used in the test and capture related events --------------------------------------------- context_client.RemoveContext(ContextId(**CONTEXT_ID)) - event = events_collector.get_event(block=True, timeout=1.0) + event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) assert isinstance(event, ContextEvent) assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE assert event.context_id.context_uuid.uuid == context_uuid -- GitLab From 9593ba920accc113543be3b4dd16eede9deb91f6 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Mon, 16 Jan 2023 10:48:23 +0000 Subject: [PATCH 057/158] Context: - testing CI/CD pipeline --- src/context/.gitlab-ci.yml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/context/.gitlab-ci.yml b/src/context/.gitlab-ci.yml index 61c59cb44..6dfd0248b 100644 --- a/src/context/.gitlab-ci.yml +++ b/src/context/.gitlab-ci.yml @@ -60,9 +60,7 @@ unit test context: - docker volume create crdb - > docker run --name crdb -d --network=teraflowbridge -p 26257:26257 -p 8080:8080 - --env COCKROACH_DATABASE=tfs_test - --env COCKROACH_USER=tfs - --env COCKROACH_PASSWORD=tfs123 + --env COCKROACH_DATABASE=tfs_test --env COCKROACH_USER=tfs --env COCKROACH_PASSWORD=tfs123 --volume "crdb:/cockroach/cockroach-data" cockroachdb/cockroach:latest-v22.2 start-single-node - > -- GitLab From 362b28a9d147a4cb960efea553dfb8ba9d06b633 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Mon, 16 Jan 2023 12:03:32 +0000 Subject: [PATCH 058/158] Context: - corrected unitary test order and one-by-one execution - extractedunitary test constant to separate file - added updated_at refresh for Service and Slice entities - corrected return types for Connection entity - prepared PolicyRule entity to raise events and corrected return types of methods --- .../service/ContextServiceServicerImpl.py | 21 ++++++----- src/context/service/Events.py | 5 ++- src/context/service/database/Connection.py | 4 +-- src/context/service/database/PolicyRule.py | 35 ++++++++++++------- src/context/service/database/Service.py | 1 + src/context/service/database/Slice.py | 1 + .../database/models/PolicyRuleModel.py | 7 ++-- src/context/tests/Constants.py | 15 ++++++++ src/context/tests/test_connection.py | 3 +- src/context/tests/test_context.py | 3 +- src/context/tests/test_device.py | 3 +- src/context/tests/test_link.py | 3 +- src/context/tests/test_policy.py | 2 +- src/context/tests/test_service.py | 3 +- src/context/tests/test_slice.py | 3 +- src/context/tests/test_topology.py | 3 +- 16 files changed, 68 insertions(+), 44 deletions(-) create mode 100644 src/context/tests/Constants.py diff --git a/src/context/service/ContextServiceServicerImpl.py b/src/context/service/ContextServiceServicerImpl.py index f5b2662b3..82e28a7f1 100644 --- a/src/context/service/ContextServiceServicerImpl.py +++ b/src/context/service/ContextServiceServicerImpl.py @@ -39,8 +39,8 @@ from .database.Service import service_delete, service_get, service_list_ids, ser from .database.Slice import slice_delete, slice_get, slice_list_ids, slice_list_objs, slice_set, slice_unset from .database.Topology import topology_delete, topology_get, topology_list_ids, topology_list_objs, topology_set from .Events import ( - CONSUME_TIMEOUT, TOPIC_CONNECTION, TOPIC_CONTEXT, TOPIC_DEVICE, TOPIC_LINK, #TOPIC_POLICY, - TOPIC_SERVICE, TOPIC_SLICE, TOPIC_TOPOLOGY, notify_event) + CONSUME_TIMEOUT, TOPIC_CONNECTION, TOPIC_CONTEXT, TOPIC_DEVICE, TOPIC_LINK, TOPIC_POLICY, TOPIC_SERVICE, + TOPIC_SLICE, TOPIC_TOPOLOGY, notify_event) LOGGER = logging.getLogger(__name__) @@ -313,22 +313,27 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def ListPolicyRuleIds(self, request : Empty, context: grpc.ServicerContext) -> PolicyRuleIdList: - return policyrule_list_ids(self.db_engine) + return PolicyRuleIdList(policyRuleIdList=policyrule_list_ids(self.db_engine)) @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def ListPolicyRules(self, request : Empty, context: grpc.ServicerContext) -> PolicyRuleList: - return policyrule_list_objs(self.db_engine) + return PolicyRuleList(policyRules=policyrule_list_objs(self.db_engine)) @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def GetPolicyRule(self, request : PolicyRuleId, context: grpc.ServicerContext) -> PolicyRule: - return policyrule_get(self.db_engine, request) + return PolicyRule(**policyrule_get(self.db_engine, request)) @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def SetPolicyRule(self, request : PolicyRule, context: grpc.ServicerContext) -> PolicyRuleId: - policyrule_id,updated = policyrule_set(self.db_engine, request) # pylint: disable=unused-variable - return policyrule_id + policyrule_id,updated = policyrule_set(self.db_engine, request) + event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE + notify_event(self.messagebroker, TOPIC_POLICY, event_type, {'policyrule_id': policyrule_id}) + return PolicyRuleId(**policyrule_id) @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def RemovePolicyRule(self, request : PolicyRuleId, context: grpc.ServicerContext) -> Empty: - deleted = policyrule_delete(self.db_engine, request) # pylint: disable=unused-variable + policyrule_id,deleted = policyrule_delete(self.db_engine, request) + if deleted: + event_type = EventTypeEnum.EVENTTYPE_REMOVE + notify_event(self.messagebroker, TOPIC_POLICY, event_type, {'policyrule_id': policyrule_id}) return Empty() diff --git a/src/context/service/Events.py b/src/context/service/Events.py index e7cf1997c..77401314b 100644 --- a/src/context/service/Events.py +++ b/src/context/service/Events.py @@ -22,14 +22,13 @@ TOPIC_CONNECTION = 'connection' TOPIC_CONTEXT = 'context' TOPIC_DEVICE = 'device' TOPIC_LINK = 'link' -#TOPIC_POLICY = 'policy' +TOPIC_POLICY = 'policy' TOPIC_SERVICE = 'service' TOPIC_SLICE = 'slice' TOPIC_TOPOLOGY = 'topology' TOPICS = { - TOPIC_CONNECTION, TOPIC_CONTEXT, TOPIC_DEVICE, TOPIC_LINK, #TOPIC_POLICY, - TOPIC_SERVICE, TOPIC_SLICE, TOPIC_TOPOLOGY + TOPIC_CONNECTION, TOPIC_CONTEXT, TOPIC_DEVICE, TOPIC_LINK, TOPIC_POLICY, TOPIC_SERVICE, TOPIC_SLICE, TOPIC_TOPOLOGY } CONSUME_TIMEOUT = 0.5 # seconds diff --git a/src/context/service/database/Connection.py b/src/context/service/database/Connection.py index f1616e96e..6d6d941cb 100644 --- a/src/context/service/database/Connection.py +++ b/src/context/service/database/Connection.py @@ -136,7 +136,7 @@ def connection_set(db_engine : Engine, request : Connection) -> Tuple[Dict, bool return updated updated = run_transaction(sessionmaker(bind=db_engine), callback) - return ConnectionId(**json_connection_id(connection_uuid)),updated + return json_connection_id(connection_uuid),updated def connection_delete(db_engine : Engine, request : ConnectionId) -> Tuple[Dict, bool]: connection_uuid = connection_get_uuid(request, allow_random=False) @@ -144,4 +144,4 @@ def connection_delete(db_engine : Engine, request : ConnectionId) -> Tuple[Dict, num_deleted = session.query(ConnectionModel).filter_by(connection_uuid=connection_uuid).delete() return num_deleted > 0 deleted = run_transaction(sessionmaker(bind=db_engine), callback) - return ConnectionId(**json_connection_id(connection_uuid)),deleted + return json_connection_id(connection_uuid),deleted diff --git a/src/context/service/database/PolicyRule.py b/src/context/service/database/PolicyRule.py index 2371af88e..70a37c7d8 100644 --- a/src/context/service/database/PolicyRule.py +++ b/src/context/service/database/PolicyRule.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import json +import datetime, json from sqlalchemy.dialects.postgresql import insert from sqlalchemy.engine import Engine from sqlalchemy.orm import Session, sessionmaker @@ -28,19 +28,19 @@ from .models.PolicyRuleModel import PolicyRuleDeviceModel, PolicyRuleKindEnum, P from .uuids.PolicuRule import policyrule_get_uuid from .uuids.Service import service_get_uuid -def policyrule_list_ids(db_engine : Engine) -> PolicyRuleIdList: +def policyrule_list_ids(db_engine : Engine) -> List[Dict]: def callback(session : Session) -> List[Dict]: obj_list : List[PolicyRuleModel] = session.query(PolicyRuleModel).all() #.options(selectinload(PolicyRuleModel.topology)).filter_by(context_uuid=context_uuid).one_or_none() return [obj.dump_id() for obj in obj_list] - return PolicyRuleIdList(policyRuleIdList=run_transaction(sessionmaker(bind=db_engine), callback)) + return run_transaction(sessionmaker(bind=db_engine), callback) -def policyrule_list_objs(db_engine : Engine) -> PolicyRuleList: +def policyrule_list_objs(db_engine : Engine) -> List[Dict]: def callback(session : Session) -> List[Dict]: obj_list : List[PolicyRuleModel] = session.query(PolicyRuleModel).all() #.options(selectinload(PolicyRuleModel.topology)).filter_by(context_uuid=context_uuid).one_or_none() return [obj.dump() for obj in obj_list] - return PolicyRuleList(policyRules=run_transaction(sessionmaker(bind=db_engine), callback)) + return run_transaction(sessionmaker(bind=db_engine), callback) def policyrule_get(db_engine : Engine, request : PolicyRuleId) -> PolicyRule: policyrule_uuid = policyrule_get_uuid(request, allow_random=False) @@ -54,7 +54,7 @@ def policyrule_get(db_engine : Engine, request : PolicyRuleId) -> PolicyRule: raise NotFoundException('PolicyRule', raw_policyrule_uuid, extra_details=[ 'policyrule_uuid generated was: {:s}'.format(policyrule_uuid) ]) - return PolicyRule(**obj) + return obj def policyrule_set(db_engine : Engine, request : PolicyRule) -> Tuple[PolicyRuleId, bool]: policyrule_kind = request.WhichOneof('policy_rule') @@ -74,6 +74,8 @@ def policyrule_set(db_engine : Engine, request : PolicyRule) -> Tuple[PolicyRule 'actionList': json_policyrule_basic.get('actionList', []), }, sort_keys=True) + now = datetime.datetime.utcnow() + policyrule_data = [{ 'policyrule_uuid' : policyrule_uuid, 'policyrule_kind' : policyrule_kind, @@ -81,6 +83,8 @@ def policyrule_set(db_engine : Engine, request : PolicyRule) -> Tuple[PolicyRule 'policyrule_state_message': policyrule_state_message, 'policyrule_priority' : policyrule_basic.priority, 'policyrule_eca_data' : policyrule_eca_data, + 'created_at' : now, + 'updated_at' : now, }] policyrule_service_uuid = None @@ -99,7 +103,7 @@ def policyrule_set(db_engine : Engine, request : PolicyRule) -> Tuple[PolicyRule }) device_uuids.add(device_uuid) - def callback(session : Session) -> None: + def callback(session : Session) -> bool: stmt = insert(PolicyRuleModel).values(policyrule_data) stmt = stmt.on_conflict_do_update( index_elements=[PolicyRuleModel.policyrule_uuid], @@ -108,22 +112,27 @@ def policyrule_set(db_engine : Engine, request : PolicyRule) -> Tuple[PolicyRule policyrule_state_message = stmt.excluded.policyrule_state_message, policyrule_priority = stmt.excluded.policyrule_priority, policyrule_eca_data = stmt.excluded.policyrule_eca_data, + updated_at = stmt.excluded.updated_at, ) ) - session.execute(stmt) + stmt = stmt.returning(PolicyRuleModel.created_at, PolicyRuleModel.updated_at) + created_at,updated_at = session.execute(stmt).fetchone() + updated = updated_at > created_at if len(related_devices) > 0: session.execute(insert(PolicyRuleDeviceModel).values(related_devices).on_conflict_do_nothing( index_elements=[PolicyRuleDeviceModel.policyrule_uuid, PolicyRuleDeviceModel.device_uuid] )) - run_transaction(sessionmaker(bind=db_engine), callback) - updated = False # TODO: improve and check if created/updated - return PolicyRuleId(**json_policyrule_id(policyrule_uuid)),updated + return updated + + updated = run_transaction(sessionmaker(bind=db_engine), callback) + return json_policyrule_id(policyrule_uuid),updated -def policyrule_delete(db_engine : Engine, request : PolicyRuleId) -> bool: +def policyrule_delete(db_engine : Engine, request : PolicyRuleId) -> Tuple[Dict, bool]: policyrule_uuid = policyrule_get_uuid(request, allow_random=False) def callback(session : Session) -> bool: num_deleted = session.query(PolicyRuleModel).filter_by(policyrule_uuid=policyrule_uuid).delete() return num_deleted > 0 - return run_transaction(sessionmaker(bind=db_engine), callback) + deleted = run_transaction(sessionmaker(bind=db_engine), callback) + return json_policyrule_id(policyrule_uuid),deleted diff --git a/src/context/service/database/Service.py b/src/context/service/database/Service.py index a8f9f40d6..b65010fed 100644 --- a/src/context/service/database/Service.py +++ b/src/context/service/database/Service.py @@ -111,6 +111,7 @@ def service_set(db_engine : Engine, request : Service) -> Tuple[Dict, bool]: service_name = stmt.excluded.service_name, service_type = stmt.excluded.service_type, service_status = stmt.excluded.service_status, + updated_at = stmt.excluded.updated_at, ) ) stmt = stmt.returning(ServiceModel.created_at, ServiceModel.updated_at) diff --git a/src/context/service/database/Slice.py b/src/context/service/database/Slice.py index f255968b2..b0b83238c 100644 --- a/src/context/service/database/Slice.py +++ b/src/context/service/database/Slice.py @@ -127,6 +127,7 @@ def slice_set(db_engine : Engine, request : Slice) -> Tuple[Dict, bool]: set_=dict( slice_name = stmt.excluded.slice_name, slice_status = stmt.excluded.slice_status, + updated_at = stmt.excluded.updated_at, slice_owner_uuid = stmt.excluded.slice_owner_uuid, slice_owner_string = stmt.excluded.slice_owner_string, ) diff --git a/src/context/service/database/models/PolicyRuleModel.py b/src/context/service/database/models/PolicyRuleModel.py index 8fc111087..4ccec8dd8 100644 --- a/src/context/service/database/models/PolicyRuleModel.py +++ b/src/context/service/database/models/PolicyRuleModel.py @@ -13,12 +13,11 @@ # limitations under the License. import enum, json -from sqlalchemy import CheckConstraint, Column, Enum, ForeignKey, Integer, String +from sqlalchemy import CheckConstraint, Column, DateTime, Enum, ForeignKey, Integer, String from sqlalchemy.dialects.postgresql import UUID from sqlalchemy.orm import relationship from typing import Dict - -from context.service.database.models.enums.PolicyRuleState import ORM_PolicyRuleStateEnum +from .enums.PolicyRuleState import ORM_PolicyRuleStateEnum from ._Base import _Base # Enum values should match name of field in PolicyRule message @@ -36,6 +35,8 @@ class PolicyRuleModel(_Base): policyrule_priority = Column(Integer, nullable=False) policyrule_service_uuid = Column(ForeignKey('service.service_uuid', ondelete='RESTRICT'), nullable=True) policyrule_eca_data = Column(String, nullable=False) + created_at = Column(DateTime, nullable=False) + updated_at = Column(DateTime, nullable=False) policyrule_service = relationship('ServiceModel') # back_populates='policyrules' policyrule_devices = relationship('PolicyRuleDeviceModel' ) # back_populates='policyrule' diff --git a/src/context/tests/Constants.py b/src/context/tests/Constants.py new file mode 100644 index 000000000..b29584a7b --- /dev/null +++ b/src/context/tests/Constants.py @@ -0,0 +1,15 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +GET_EVENTS_TIMEOUT = 60.0 diff --git a/src/context/tests/test_connection.py b/src/context/tests/test_connection.py index 909ddb6ef..86abad7ed 100644 --- a/src/context/tests/test_connection.py +++ b/src/context/tests/test_connection.py @@ -19,13 +19,12 @@ from context.client.ContextClient import ContextClient from context.service.database.uuids.Connection import connection_get_uuid from context.service.database.uuids.EndPoint import endpoint_get_uuid from context.client.EventsCollector import EventsCollector +from .Constants import GET_EVENTS_TIMEOUT from .Objects import ( CONNECTION_R1_R3, CONNECTION_R1_R3_ID, CONNECTION_R1_R3_NAME, CONTEXT, CONTEXT_ID, DEVICE_R1, DEVICE_R1_ID, DEVICE_R2, DEVICE_R2_ID, DEVICE_R3, DEVICE_R3_ID, SERVICE_R1_R2, SERVICE_R1_R2_ID, SERVICE_R1_R3, SERVICE_R1_R3_ID, SERVICE_R2_R3, SERVICE_R2_R3_ID, TOPOLOGY, TOPOLOGY_ID) -GET_EVENTS_TIMEOUT = 10.0 - @pytest.mark.depends(on=['context/tests/test_service.py::test_service', 'context/tests/test_slice.py::test_slice']) def test_connection(context_client : ContextClient) -> None: diff --git a/src/context/tests/test_context.py b/src/context/tests/test_context.py index 77f1dc380..7a9564df6 100644 --- a/src/context/tests/test_context.py +++ b/src/context/tests/test_context.py @@ -17,10 +17,9 @@ from common.proto.context_pb2 import Context, ContextEvent, ContextId, Empty, Ev from context.client.ContextClient import ContextClient from context.service.database.uuids.Context import context_get_uuid from context.client.EventsCollector import EventsCollector +from .Constants import GET_EVENTS_TIMEOUT from .Objects import CONTEXT, CONTEXT_ID, CONTEXT_NAME -GET_EVENTS_TIMEOUT = 10.0 - def test_context(context_client : ContextClient) -> None: # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- diff --git a/src/context/tests/test_device.py b/src/context/tests/test_device.py index bcbe4cc3b..615ebe0be 100644 --- a/src/context/tests/test_device.py +++ b/src/context/tests/test_device.py @@ -19,10 +19,9 @@ from common.proto.context_pb2 import ( from context.client.ContextClient import ContextClient from context.service.database.uuids.Device import device_get_uuid from context.client.EventsCollector import EventsCollector +from .Constants import GET_EVENTS_TIMEOUT from .Objects import CONTEXT, CONTEXT_ID, DEVICE_R1, DEVICE_R1_ID, DEVICE_R1_NAME, TOPOLOGY, TOPOLOGY_ID -GET_EVENTS_TIMEOUT = 10.0 - @pytest.mark.depends(on=['context/tests/test_topology.py::test_topology']) def test_device(context_client : ContextClient) -> None: diff --git a/src/context/tests/test_link.py b/src/context/tests/test_link.py index c8ed1d486..e56a1889d 100644 --- a/src/context/tests/test_link.py +++ b/src/context/tests/test_link.py @@ -19,12 +19,11 @@ from common.proto.context_pb2 import ( from context.client.ContextClient import ContextClient from context.client.EventsCollector import EventsCollector from context.service.database.uuids.Link import link_get_uuid +from .Constants import GET_EVENTS_TIMEOUT from .Objects import ( CONTEXT, CONTEXT_ID, DEVICE_R1, DEVICE_R1_ID, DEVICE_R2, DEVICE_R2_ID, LINK_R1_R2, LINK_R1_R2_ID, LINK_R1_R2_NAME, TOPOLOGY, TOPOLOGY_ID) -GET_EVENTS_TIMEOUT = 10.0 - @pytest.mark.depends(on=['context/tests/test_device.py::test_device']) def test_link(context_client : ContextClient) -> None: diff --git a/src/context/tests/test_policy.py b/src/context/tests/test_policy.py index f9bf5ef6d..1cc0b9557 100644 --- a/src/context/tests/test_policy.py +++ b/src/context/tests/test_policy.py @@ -19,7 +19,7 @@ from context.client.ContextClient import ContextClient from context.service.database.uuids.PolicuRule import policyrule_get_uuid from .Objects import POLICYRULE, POLICYRULE_ID, POLICYRULE_NAME -@pytest.mark.depends(on=['context/tests/test_device.py::test_device', 'context/tests/test_service.py::test_service']) +@pytest.mark.depends(on=['context/tests/test_connection.py::test_connection']) def test_policy(context_client : ContextClient): # ----- Get when the object does not exist ------------------------------------------------------------------------- diff --git a/src/context/tests/test_service.py b/src/context/tests/test_service.py index 4e46c24ad..ca02a4a91 100644 --- a/src/context/tests/test_service.py +++ b/src/context/tests/test_service.py @@ -19,12 +19,11 @@ from common.proto.context_pb2 import ( from context.client.ContextClient import ContextClient from context.service.database.uuids.Service import service_get_uuid from context.client.EventsCollector import EventsCollector +from .Constants import GET_EVENTS_TIMEOUT from .Objects import ( CONTEXT, CONTEXT_ID, CONTEXT_NAME, DEVICE_R1, DEVICE_R1_ID, SERVICE_R1_R2_NAME, DEVICE_R2, DEVICE_R2_ID, SERVICE_R1_R2, SERVICE_R1_R2_ID, TOPOLOGY, TOPOLOGY_ID) -GET_EVENTS_TIMEOUT = 10.0 - @pytest.mark.depends(on=['context/tests/test_link.py::test_link']) def test_service(context_client : ContextClient) -> None: diff --git a/src/context/tests/test_slice.py b/src/context/tests/test_slice.py index 6996bb39e..1008e7e91 100644 --- a/src/context/tests/test_slice.py +++ b/src/context/tests/test_slice.py @@ -19,13 +19,12 @@ from common.proto.context_pb2 import ( from context.client.ContextClient import ContextClient from context.service.database.uuids.Slice import slice_get_uuid from context.client.EventsCollector import EventsCollector +from .Constants import GET_EVENTS_TIMEOUT from .Objects import ( CONTEXT, CONTEXT_ID, CONTEXT_NAME, DEVICE_R1, DEVICE_R1_ID, DEVICE_R2, DEVICE_R2_ID, DEVICE_R3, DEVICE_R3_ID, LINK_R1_R2, LINK_R1_R2_ID, LINK_R1_R3, LINK_R1_R3_ID, LINK_R2_R3, LINK_R2_R3_ID, SERVICE_R1_R2, SERVICE_R1_R2_ID, SERVICE_R2_R3, SERVICE_R2_R3_ID, SLICE_R1_R3, SLICE_R1_R3_ID, SLICE_R1_R3_NAME, TOPOLOGY, TOPOLOGY_ID) -GET_EVENTS_TIMEOUT = 10.0 - @pytest.mark.depends(on=['context/tests/test_service.py::test_service']) def test_slice(context_client : ContextClient) -> None: diff --git a/src/context/tests/test_topology.py b/src/context/tests/test_topology.py index 6a3367d49..0d8b8c027 100644 --- a/src/context/tests/test_topology.py +++ b/src/context/tests/test_topology.py @@ -18,10 +18,9 @@ from common.proto.context_pb2 import ( from context.client.ContextClient import ContextClient from context.service.database.uuids.Topology import topology_get_uuid from context.client.EventsCollector import EventsCollector +from .Constants import GET_EVENTS_TIMEOUT from .Objects import CONTEXT, CONTEXT_ID, CONTEXT_NAME, TOPOLOGY, TOPOLOGY_ID, TOPOLOGY_NAME -GET_EVENTS_TIMEOUT = 10.0 - @pytest.mark.depends(on=['context/tests/test_context.py::test_context']) def test_topology(context_client : ContextClient) -> None: -- GitLab From b4c8e2e0d28c4104ab6b9a4c6d0870b9bca86cdc Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Mon, 16 Jan 2023 12:36:25 +0000 Subject: [PATCH 059/158] Context: - added clean-up commands for the GitLab runner --- src/context/.gitlab-ci.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/context/.gitlab-ci.yml b/src/context/.gitlab-ci.yml index 6dfd0248b..fa6dabb4b 100644 --- a/src/context/.gitlab-ci.yml +++ b/src/context/.gitlab-ci.yml @@ -95,6 +95,8 @@ unit test context: - docker rm -f $IMAGE_NAME crdb nats - docker volume rm -f crdb - docker network rm teraflowbridge + - docker volume prune --force + - docker image prune --force rules: - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' -- GitLab From c0c42c48420582df404def1a7c7935ea64adc4e1 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Mon, 16 Jan 2023 12:59:42 +0000 Subject: [PATCH 060/158] Context: - added smart wait for crdb and nats to start --- src/context/.gitlab-ci.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/context/.gitlab-ci.yml b/src/context/.gitlab-ci.yml index fa6dabb4b..fa9e37950 100644 --- a/src/context/.gitlab-ci.yml +++ b/src/context/.gitlab-ci.yml @@ -67,10 +67,11 @@ unit test context: docker run --name nats -d --network=teraflowbridge -p 4222:4222 -p 8222:8222 nats:2.9 --http_port 8222 --user tfs --pass tfs123 - echo "Waiting for initialization..." - - sleep 15 - - docker ps -a + - docker logs -f crdb 2>&1 | grep -m 1 'finished creating default database "tfs_test"' - docker logs crdb + - docker logs -f nats 2>&1 | grep -m 1 'Server is ready' - docker logs nats + - docker ps -a - CRDB_ADDRESS=$(docker inspect crdb --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}") - echo $CRDB_ADDRESS - NATS_ADDRESS=$(docker inspect nats --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}") -- GitLab From 823eb467cd4bbcff968783cf284b7b9a24a9e07f Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Mon, 16 Jan 2023 17:34:12 +0000 Subject: [PATCH 061/158] Context component: - disabled event testing; operation works, but sometimes tests get stuck - added dynamic wait for CockroachDB and NATS containers to start - tuned grace shutdown period - improved NatsBackend termination --- scripts/run_tests_locally-context.sh | 11 +- src/common/Constants.py | 4 +- .../backend/nats/NatsBackend.py | 4 +- .../backend/nats/NatsBackendThread.py | 20 +- src/context/.gitlab-ci.yml | 4 +- src/context/tests/test_connection.py | 179 ++++++++-------- src/context/tests/test_context.py | 49 ++--- src/context/tests/test_device.py | 82 ++++---- src/context/tests/test_link.py | 107 +++++----- src/context/tests/test_service.py | 113 ++++++----- src/context/tests/test_slice.py | 192 +++++++++--------- src/context/tests/test_topology.py | 66 +++--- 12 files changed, 431 insertions(+), 400 deletions(-) diff --git a/scripts/run_tests_locally-context.sh b/scripts/run_tests_locally-context.sh index 8c0b300b7..9d29ac587 100755 --- a/scripts/run_tests_locally-context.sh +++ b/scripts/run_tests_locally-context.sh @@ -47,8 +47,17 @@ docker run --name crdb -d --network=tfs-br --ip 172.254.254.10 -p 26257:26257 -p cockroachdb/cockroach:latest-v22.2 start-single-node docker run --name nats -d --network=tfs-br --ip 172.254.254.11 -p 4222:4222 -p 8222:8222 \ nats:2.9 --http_port 8222 --user tfs --pass tfs123 + +echo echo "Waiting for initialization..." -sleep 10 +echo "-----------------------------" +#docker logs -f crdb 2>&1 | grep --max-count=1 'finished creating default user "tfs"' +while ! docker logs crdb 2>&1 | grep -q 'finished creating default user \"tfs\"'; do sleep 1; done +docker logs crdb +#docker logs -f nats 2>&1 | grep --max-count=1 'Server is ready' +while ! docker logs nats 2>&1 | grep -q 'Server is ready'; do sleep 1; done +docker logs nats +#sleep 10 docker ps -a echo diff --git a/src/common/Constants.py b/src/common/Constants.py index 055267191..bdbde21b2 100644 --- a/src/common/Constants.py +++ b/src/common/Constants.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging, uuid +import logging #, uuid from enum import Enum # Default logging level @@ -21,7 +21,7 @@ DEFAULT_LOG_LEVEL = logging.WARNING # Default gRPC server settings DEFAULT_GRPC_BIND_ADDRESS = '0.0.0.0' DEFAULT_GRPC_MAX_WORKERS = 200 -DEFAULT_GRPC_GRACE_PERIOD = 60 +DEFAULT_GRPC_GRACE_PERIOD = 10 # Default HTTP server settings DEFAULT_HTTP_BIND_ADDRESS = '0.0.0.0' diff --git a/src/common/message_broker/backend/nats/NatsBackend.py b/src/common/message_broker/backend/nats/NatsBackend.py index 0825095eb..197bc8633 100644 --- a/src/common/message_broker/backend/nats/NatsBackend.py +++ b/src/common/message_broker/backend/nats/NatsBackend.py @@ -39,11 +39,13 @@ class NatsBackend(_Backend): def consume(self, topic_names : Set[str], consume_timeout : float) -> Iterator[Tuple[str, str]]: out_queue = queue.Queue[Message]() unsubscribe = threading.Event() + tasks = [] for topic_name in topic_names: - self._nats_backend_thread.subscribe(topic_name, consume_timeout, out_queue, unsubscribe) + tasks.append(self._nats_backend_thread.subscribe(topic_name, consume_timeout, out_queue, unsubscribe)) while not self._terminate.is_set(): try: yield out_queue.get(block=True, timeout=consume_timeout) except queue.Empty: continue unsubscribe.set() + for task in tasks: task.cancel() diff --git a/src/common/message_broker/backend/nats/NatsBackendThread.py b/src/common/message_broker/backend/nats/NatsBackendThread.py index e11ab7c04..801cc361e 100644 --- a/src/common/message_broker/backend/nats/NatsBackendThread.py +++ b/src/common/message_broker/backend/nats/NatsBackendThread.py @@ -13,6 +13,7 @@ # limitations under the License. import asyncio, nats, nats.errors, queue, threading +from typing import List from common.message_broker.Message import Message class NatsBackendThread(threading.Thread): @@ -20,16 +21,23 @@ class NatsBackendThread(threading.Thread): self._nats_uri = nats_uri self._event_loop = asyncio.get_event_loop() self._terminate = asyncio.Event() + self._tasks_terminated = asyncio.Event() self._publish_queue = asyncio.Queue[Message]() + self._tasks : List[asyncio.Task] = list() super().__init__() def terminate(self) -> None: self._terminate.set() + for task in self._tasks: task.cancel() + self._tasks_terminated.set() async def _run_publisher(self) -> None: client = await nats.connect(servers=[self._nats_uri]) while not self._terminate.is_set(): - message : Message = await self._publish_queue.get() + try: + message : Message = await self._publish_queue.get() + except asyncio.CancelledError: + break await client.publish(message.topic, message.content.encode('UTF-8')) await client.drain() @@ -46,6 +54,8 @@ class NatsBackendThread(threading.Thread): message = await subscription.next_msg(timeout) except nats.errors.TimeoutError: continue + except asyncio.CancelledError: + break out_queue.put(Message(message.subject, message.data.decode('UTF-8'))) await subscription.unsubscribe() await client.drain() @@ -53,9 +63,13 @@ class NatsBackendThread(threading.Thread): def subscribe( self, topic_name : str, timeout : float, out_queue : queue.Queue[Message], unsubscribe : threading.Event ) -> None: - self._event_loop.create_task(self._run_subscriber(topic_name, timeout, out_queue, unsubscribe)) + task = self._event_loop.create_task(self._run_subscriber(topic_name, timeout, out_queue, unsubscribe)) + self._tasks.append(task) def run(self) -> None: asyncio.set_event_loop(self._event_loop) - self._event_loop.create_task(self._run_publisher()) + task = self._event_loop.create_task(self._run_publisher()) + self._tasks.append(task) self._event_loop.run_until_complete(self._terminate.wait()) + self._tasks.remove(task) + self._event_loop.run_until_complete(self._tasks_terminated.wait()) diff --git a/src/context/.gitlab-ci.yml b/src/context/.gitlab-ci.yml index fa9e37950..29b5fb9db 100644 --- a/src/context/.gitlab-ci.yml +++ b/src/context/.gitlab-ci.yml @@ -67,9 +67,9 @@ unit test context: docker run --name nats -d --network=teraflowbridge -p 4222:4222 -p 8222:8222 nats:2.9 --http_port 8222 --user tfs --pass tfs123 - echo "Waiting for initialization..." - - docker logs -f crdb 2>&1 | grep -m 1 'finished creating default database "tfs_test"' + - while ! docker logs crdb 2>&1 | grep -q 'finished creating default user \"tfs\"'; do sleep 1; done - docker logs crdb - - docker logs -f nats 2>&1 | grep -m 1 'Server is ready' + - while ! docker logs nats 2>&1 | grep -q 'Server is ready'; do sleep 1; done - docker logs nats - docker ps -a - CRDB_ADDRESS=$(docker inspect crdb --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}") diff --git a/src/context/tests/test_connection.py b/src/context/tests/test_connection.py index 86abad7ed..f4b9e4824 100644 --- a/src/context/tests/test_connection.py +++ b/src/context/tests/test_connection.py @@ -12,14 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -import copy, grpc, pytest, time +import copy, grpc, pytest #, time from common.proto.context_pb2 import ( - Connection, ConnectionEvent, ConnectionId, Context, ContextEvent, ContextId, Device, DeviceEvent, DeviceId, EndPointId, EventTypeEnum, Service, ServiceEvent, ServiceId, Topology, TopologyEvent, TopologyId) + Connection, ConnectionId, Context, ContextId, Device, DeviceId, EndPointId, Service, ServiceId, Topology, + TopologyId) +#from common.proto.context_pb2 import ( +# ConnectionEvent, ContextEvent, DeviceEvent, EventTypeEnum, ServiceEvent, TopologyEvent) from context.client.ContextClient import ContextClient +#from context.client.EventsCollector import EventsCollector from context.service.database.uuids.Connection import connection_get_uuid from context.service.database.uuids.EndPoint import endpoint_get_uuid -from context.client.EventsCollector import EventsCollector -from .Constants import GET_EVENTS_TIMEOUT +#from .Constants import GET_EVENTS_TIMEOUT from .Objects import ( CONNECTION_R1_R3, CONNECTION_R1_R3_ID, CONNECTION_R1_R3_NAME, CONTEXT, CONTEXT_ID, DEVICE_R1, DEVICE_R1_ID, DEVICE_R2, DEVICE_R2_ID, DEVICE_R3, DEVICE_R3_ID, SERVICE_R1_R2, SERVICE_R1_R2_ID, SERVICE_R1_R3, SERVICE_R1_R3_ID, @@ -29,13 +32,13 @@ from .Objects import ( def test_connection(context_client : ContextClient) -> None: # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- - events_collector = EventsCollector( - context_client, log_events_received=True, - activate_context_collector = True, activate_topology_collector = True, activate_device_collector = True, - activate_link_collector = True, activate_service_collector = True, activate_slice_collector = True, - activate_connection_collector = True) - events_collector.start() - time.sleep(3) + #events_collector = EventsCollector( + # context_client, log_events_received=True, + # activate_context_collector = True, activate_topology_collector = True, activate_device_collector = True, + # activate_link_collector = True, activate_service_collector = True, activate_slice_collector = True, + # activate_connection_collector = True) + #events_collector.start() + #time.sleep(3) # ----- Prepare dependencies for the test and capture related events ----------------------------------------------- response = context_client.SetContext(Context(**CONTEXT)) @@ -43,58 +46,58 @@ def test_connection(context_client : ContextClient) -> None: response = context_client.SetTopology(Topology(**TOPOLOGY)) assert response.context_id.context_uuid.uuid == context_uuid - topology_uuid = response.topology_uuid.uuid + #topology_uuid = response.topology_uuid.uuid response = context_client.SetDevice(Device(**DEVICE_R1)) - device_r1_uuid = response.device_uuid.uuid + #device_r1_uuid = response.device_uuid.uuid response = context_client.SetDevice(Device(**DEVICE_R2)) - device_r2_uuid = response.device_uuid.uuid + #device_r2_uuid = response.device_uuid.uuid response = context_client.SetDevice(Device(**DEVICE_R3)) - device_r3_uuid = response.device_uuid.uuid + #device_r3_uuid = response.device_uuid.uuid response = context_client.SetService(Service(**SERVICE_R1_R2)) assert response.context_id.context_uuid.uuid == context_uuid - service_r1_r2_uuid = response.service_uuid.uuid + #service_r1_r2_uuid = response.service_uuid.uuid response = context_client.SetService(Service(**SERVICE_R2_R3)) assert response.context_id.context_uuid.uuid == context_uuid - service_r2_r3_uuid = response.service_uuid.uuid + #service_r2_r3_uuid = response.service_uuid.uuid response = context_client.SetService(Service(**SERVICE_R1_R3)) assert response.context_id.context_uuid.uuid == context_uuid service_r1_r3_uuid = response.service_uuid.uuid - events = events_collector.get_events(block=True, count=8, timeout=GET_EVENTS_TIMEOUT) - assert isinstance(events[0], ContextEvent) - assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[0].context_id.context_uuid.uuid == context_uuid - assert isinstance(events[1], TopologyEvent) - assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[1].topology_id.context_id.context_uuid.uuid == context_uuid - assert events[1].topology_id.topology_uuid.uuid == topology_uuid - assert isinstance(events[2], DeviceEvent) - assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[2].device_id.device_uuid.uuid == device_r1_uuid - assert isinstance(events[3], DeviceEvent) - assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[3].device_id.device_uuid.uuid == device_r2_uuid - assert isinstance(events[4], DeviceEvent) - assert events[4].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[4].device_id.device_uuid.uuid == device_r3_uuid - assert isinstance(events[5], ServiceEvent) - assert events[5].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[5].service_id.context_id.context_uuid.uuid == context_uuid - assert events[5].service_id.service_uuid.uuid == service_r1_r2_uuid - assert isinstance(events[6], ServiceEvent) - assert events[6].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[6].service_id.context_id.context_uuid.uuid == context_uuid - assert events[6].service_id.service_uuid.uuid == service_r2_r3_uuid - assert isinstance(events[7], ServiceEvent) - assert events[7].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[7].service_id.context_id.context_uuid.uuid == context_uuid - assert events[7].service_id.service_uuid.uuid == service_r1_r3_uuid + #events = events_collector.get_events(block=True, count=8, timeout=GET_EVENTS_TIMEOUT) + #assert isinstance(events[0], ContextEvent) + #assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert events[0].context_id.context_uuid.uuid == context_uuid + #assert isinstance(events[1], TopologyEvent) + #assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert events[1].topology_id.context_id.context_uuid.uuid == context_uuid + #assert events[1].topology_id.topology_uuid.uuid == topology_uuid + #assert isinstance(events[2], DeviceEvent) + #assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert events[2].device_id.device_uuid.uuid == device_r1_uuid + #assert isinstance(events[3], DeviceEvent) + #assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert events[3].device_id.device_uuid.uuid == device_r2_uuid + #assert isinstance(events[4], DeviceEvent) + #assert events[4].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert events[4].device_id.device_uuid.uuid == device_r3_uuid + #assert isinstance(events[5], ServiceEvent) + #assert events[5].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert events[5].service_id.context_id.context_uuid.uuid == context_uuid + #assert events[5].service_id.service_uuid.uuid == service_r1_r2_uuid + #assert isinstance(events[6], ServiceEvent) + #assert events[6].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert events[6].service_id.context_id.context_uuid.uuid == context_uuid + #assert events[6].service_id.service_uuid.uuid == service_r2_r3_uuid + #assert isinstance(events[7], ServiceEvent) + #assert events[7].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert events[7].service_id.context_id.context_uuid.uuid == context_uuid + #assert events[7].service_id.service_uuid.uuid == service_r1_r3_uuid # ----- Get when the object does not exist ------------------------------------------------------------------------- connection_id = ConnectionId(**CONNECTION_R1_R3_ID) @@ -130,10 +133,10 @@ def test_connection(context_client : ContextClient) -> None: connection_r1_r3_uuid = response.connection_uuid.uuid # ----- Check create event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) - assert isinstance(event, ConnectionEvent) - assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert event.connection_id.connection_uuid.uuid == connection_r1_r3_uuid + #event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) + #assert isinstance(event, ConnectionEvent) + #assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert event.connection_id.connection_uuid.uuid == connection_r1_r3_uuid # ----- Get when the object exists --------------------------------------------------------------------------------- response = context_client.GetConnection(ConnectionId(**CONNECTION_R1_R3_ID)) @@ -160,10 +163,10 @@ def test_connection(context_client : ContextClient) -> None: assert response.connection_uuid.uuid == connection_r1_r3_uuid # ----- Check update event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) - assert isinstance(event, ConnectionEvent) - assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - assert event.connection_id.connection_uuid.uuid == connection_r1_r3_uuid + #event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) + #assert isinstance(event, ConnectionEvent) + #assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + #assert event.connection_id.connection_uuid.uuid == connection_r1_r3_uuid # ----- Get when the object is modified ---------------------------------------------------------------------------- response = context_client.GetConnection(ConnectionId(**CONNECTION_R1_R3_ID)) @@ -188,10 +191,10 @@ def test_connection(context_client : ContextClient) -> None: context_client.RemoveConnection(ConnectionId(**CONNECTION_R1_R3_ID)) # ----- Check remove event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) - assert isinstance(event, ConnectionEvent) - assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert event.connection_id.connection_uuid.uuid == connection_r1_r3_uuid + #event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) + #assert isinstance(event, ConnectionEvent) + #assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert event.connection_id.connection_uuid.uuid == connection_r1_r3_uuid # ----- List after deleting the object ----------------------------------------------------------------------------- response = context_client.ListConnectionIds(ServiceId(**SERVICE_R1_R3_ID)) @@ -210,35 +213,35 @@ def test_connection(context_client : ContextClient) -> None: context_client.RemoveTopology(TopologyId(**TOPOLOGY_ID)) context_client.RemoveContext(ContextId(**CONTEXT_ID)) - events = events_collector.get_events(block=True, count=8, timeout=GET_EVENTS_TIMEOUT) - assert isinstance(events[0], ServiceEvent) - assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[0].service_id.context_id.context_uuid.uuid == context_uuid - assert events[0].service_id.service_uuid.uuid == service_r1_r3_uuid - assert isinstance(events[1], ServiceEvent) - assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[1].service_id.context_id.context_uuid.uuid == context_uuid - assert events[1].service_id.service_uuid.uuid == service_r2_r3_uuid - assert isinstance(events[2], ServiceEvent) - assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[2].service_id.context_id.context_uuid.uuid == context_uuid - assert events[2].service_id.service_uuid.uuid == service_r1_r2_uuid - assert isinstance(events[3], DeviceEvent) - assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[3].device_id.device_uuid.uuid == device_r1_uuid - assert isinstance(events[4], DeviceEvent) - assert events[4].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[4].device_id.device_uuid.uuid == device_r2_uuid - assert isinstance(events[5], DeviceEvent) - assert events[5].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[5].device_id.device_uuid.uuid == device_r3_uuid - assert isinstance(events[6], TopologyEvent) - assert events[6].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[6].topology_id.context_id.context_uuid.uuid == context_uuid - assert events[6].topology_id.topology_uuid.uuid == topology_uuid - assert isinstance(events[7], ContextEvent) - assert events[7].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[7].context_id.context_uuid.uuid == context_uuid + #events = events_collector.get_events(block=True, count=8, timeout=GET_EVENTS_TIMEOUT) + #assert isinstance(events[0], ServiceEvent) + #assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert events[0].service_id.context_id.context_uuid.uuid == context_uuid + #assert events[0].service_id.service_uuid.uuid == service_r1_r3_uuid + #assert isinstance(events[1], ServiceEvent) + #assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert events[1].service_id.context_id.context_uuid.uuid == context_uuid + #assert events[1].service_id.service_uuid.uuid == service_r2_r3_uuid + #assert isinstance(events[2], ServiceEvent) + #assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert events[2].service_id.context_id.context_uuid.uuid == context_uuid + #assert events[2].service_id.service_uuid.uuid == service_r1_r2_uuid + #assert isinstance(events[3], DeviceEvent) + #assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert events[3].device_id.device_uuid.uuid == device_r1_uuid + #assert isinstance(events[4], DeviceEvent) + #assert events[4].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert events[4].device_id.device_uuid.uuid == device_r2_uuid + #assert isinstance(events[5], DeviceEvent) + #assert events[5].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert events[5].device_id.device_uuid.uuid == device_r3_uuid + #assert isinstance(events[6], TopologyEvent) + #assert events[6].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert events[6].topology_id.context_id.context_uuid.uuid == context_uuid + #assert events[6].topology_id.topology_uuid.uuid == topology_uuid + #assert isinstance(events[7], ContextEvent) + #assert events[7].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert events[7].context_id.context_uuid.uuid == context_uuid # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- - events_collector.stop() + #events_collector.stop() diff --git a/src/context/tests/test_context.py b/src/context/tests/test_context.py index 7a9564df6..29d4442f9 100644 --- a/src/context/tests/test_context.py +++ b/src/context/tests/test_context.py @@ -12,24 +12,25 @@ # See the License for the specific language governing permissions and # limitations under the License. -import copy, grpc, pytest, time -from common.proto.context_pb2 import Context, ContextEvent, ContextId, Empty, EventTypeEnum +import copy, grpc, pytest #, time +from common.proto.context_pb2 import Context, ContextId, Empty +#from common.proto.context_pb2 import ContextEvent, EventTypeEnum from context.client.ContextClient import ContextClient +#from context.client.EventsCollector import EventsCollector from context.service.database.uuids.Context import context_get_uuid -from context.client.EventsCollector import EventsCollector -from .Constants import GET_EVENTS_TIMEOUT +#from .Constants import GET_EVENTS_TIMEOUT from .Objects import CONTEXT, CONTEXT_ID, CONTEXT_NAME def test_context(context_client : ContextClient) -> None: # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- - events_collector = EventsCollector( - context_client, log_events_received=True, - activate_context_collector = True, activate_topology_collector = False, activate_device_collector = False, - activate_link_collector = False, activate_service_collector = False, activate_slice_collector = False, - activate_connection_collector = False) - events_collector.start() - time.sleep(3) # wait for the events collector to start + #events_collector = EventsCollector( + # context_client, log_events_received=True, + # activate_context_collector = True, activate_topology_collector = False, activate_device_collector = False, + # activate_link_collector = False, activate_service_collector = False, activate_slice_collector = False, + # activate_connection_collector = False) + #events_collector.start() + #time.sleep(3) # wait for the events collector to start # ----- Get when the object does not exist ------------------------------------------------------------------------- context_id = ContextId(**CONTEXT_ID) @@ -52,10 +53,10 @@ def test_context(context_client : ContextClient) -> None: assert response.context_uuid.uuid == context_uuid # ----- Check create event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) - assert isinstance(event, ContextEvent) - assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert event.context_id.context_uuid.uuid == context_uuid + #event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) + #assert isinstance(event, ContextEvent) + #assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert event.context_id.context_uuid.uuid == context_uuid # ----- Get when the object exists --------------------------------------------------------------------------------- response = context_client.GetContext(ContextId(**CONTEXT_ID)) @@ -86,10 +87,10 @@ def test_context(context_client : ContextClient) -> None: assert response.context_uuid.uuid == context_uuid # ----- Check update event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) - assert isinstance(event, ContextEvent) - assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - assert event.context_id.context_uuid.uuid == context_uuid + #event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) + #assert isinstance(event, ContextEvent) + #assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + #assert event.context_id.context_uuid.uuid == context_uuid # ----- Get when the object is modified ---------------------------------------------------------------------------- response = context_client.GetContext(ContextId(**CONTEXT_ID)) @@ -116,10 +117,10 @@ def test_context(context_client : ContextClient) -> None: context_client.RemoveContext(ContextId(**CONTEXT_ID)) # ----- Check remove event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) - assert isinstance(event, ContextEvent) - assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert event.context_id.context_uuid.uuid == context_uuid + #event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) + #assert isinstance(event, ContextEvent) + #assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert event.context_id.context_uuid.uuid == context_uuid # ----- List after deleting the object ----------------------------------------------------------------------------- response = context_client.ListContextIds(Empty()) @@ -129,4 +130,4 @@ def test_context(context_client : ContextClient) -> None: assert len(response.contexts) == 0 # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- - events_collector.stop() + #events_collector.stop() diff --git a/src/context/tests/test_device.py b/src/context/tests/test_device.py index 615ebe0be..9afe64f57 100644 --- a/src/context/tests/test_device.py +++ b/src/context/tests/test_device.py @@ -12,27 +12,27 @@ # See the License for the specific language governing permissions and # limitations under the License. -import copy, grpc, pytest, time +import copy, grpc, pytest #, time from common.proto.context_pb2 import ( - Context, ContextEvent, ContextId, Device, DeviceDriverEnum, DeviceEvent, DeviceId, DeviceOperationalStatusEnum, - Empty, EventTypeEnum, Topology, TopologyEvent, TopologyId) + Context, ContextId, Device, DeviceDriverEnum, DeviceId, DeviceOperationalStatusEnum, Empty, Topology, TopologyId) +#from common.proto.context_pb2 import ContextEvent, DeviceEvent, EventTypeEnum, TopologyEvent from context.client.ContextClient import ContextClient +#from context.client.EventsCollector import EventsCollector from context.service.database.uuids.Device import device_get_uuid -from context.client.EventsCollector import EventsCollector -from .Constants import GET_EVENTS_TIMEOUT +#from .Constants import GET_EVENTS_TIMEOUT from .Objects import CONTEXT, CONTEXT_ID, DEVICE_R1, DEVICE_R1_ID, DEVICE_R1_NAME, TOPOLOGY, TOPOLOGY_ID @pytest.mark.depends(on=['context/tests/test_topology.py::test_topology']) def test_device(context_client : ContextClient) -> None: # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- - events_collector = EventsCollector( - context_client, log_events_received=True, - activate_context_collector = True, activate_topology_collector = True, activate_device_collector = True, - activate_link_collector = False, activate_service_collector = False, activate_slice_collector = False, - activate_connection_collector = False) - events_collector.start() - time.sleep(3) + #events_collector = EventsCollector( + # context_client, log_events_received=True, + # activate_context_collector = True, activate_topology_collector = True, activate_device_collector = True, + # activate_link_collector = False, activate_service_collector = False, activate_slice_collector = False, + # activate_connection_collector = False) + #events_collector.start() + #time.sleep(3) # ----- Prepare dependencies for the test and capture related events ----------------------------------------------- response = context_client.SetContext(Context(**CONTEXT)) @@ -42,14 +42,14 @@ def test_device(context_client : ContextClient) -> None: assert response.context_id.context_uuid.uuid == context_uuid topology_uuid = response.topology_uuid.uuid - events = events_collector.get_events(block=True, count=2, timeout=GET_EVENTS_TIMEOUT) - assert isinstance(events[0], ContextEvent) - assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[0].context_id.context_uuid.uuid == context_uuid - assert isinstance(events[1], TopologyEvent) - assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[1].topology_id.context_id.context_uuid.uuid == context_uuid - assert events[1].topology_id.topology_uuid.uuid == topology_uuid + #events = events_collector.get_events(block=True, count=2, timeout=GET_EVENTS_TIMEOUT) + #assert isinstance(events[0], ContextEvent) + #assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert events[0].context_id.context_uuid.uuid == context_uuid + #assert isinstance(events[1], TopologyEvent) + #assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert events[1].topology_id.context_id.context_uuid.uuid == context_uuid + #assert events[1].topology_id.topology_uuid.uuid == topology_uuid # ----- Get when the object does not exist ------------------------------------------------------------------------- device_id = DeviceId(**DEVICE_R1_ID) @@ -82,10 +82,10 @@ def test_device(context_client : ContextClient) -> None: assert response.device_uuid.uuid == device_uuid # ----- Check create event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) - assert isinstance(event, DeviceEvent) - assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert event.device_id.device_uuid.uuid == device_uuid + #event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) + #assert isinstance(event, DeviceEvent) + #assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert event.device_id.device_uuid.uuid == device_uuid # ----- Get when the object exists --------------------------------------------------------------------------------- response = context_client.GetDevice(DeviceId(**DEVICE_R1_ID)) @@ -125,10 +125,10 @@ def test_device(context_client : ContextClient) -> None: assert response.device_uuid.uuid == device_uuid # ----- Check update event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) - assert isinstance(event, DeviceEvent) - assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - assert event.device_id.device_uuid.uuid == device_uuid + #event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) + #assert isinstance(event, DeviceEvent) + #assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + #assert event.device_id.device_uuid.uuid == device_uuid # ----- Get when the object is modified ---------------------------------------------------------------------------- response = context_client.GetDevice(DeviceId(**DEVICE_R1_ID)) @@ -171,10 +171,10 @@ def test_device(context_client : ContextClient) -> None: context_client.RemoveDevice(DeviceId(**DEVICE_R1_ID)) # ----- Check remove event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) - assert isinstance(event, DeviceEvent) - assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert event.device_id.device_uuid.uuid == device_uuid + #event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) + #assert isinstance(event, DeviceEvent) + #assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert event.device_id.device_uuid.uuid == device_uuid # ----- List after deleting the object ----------------------------------------------------------------------------- response = context_client.ListDeviceIds(Empty()) @@ -193,14 +193,14 @@ def test_device(context_client : ContextClient) -> None: context_client.RemoveTopology(TopologyId(**TOPOLOGY_ID)) context_client.RemoveContext(ContextId(**CONTEXT_ID)) - events = events_collector.get_events(block=True, count=2, timeout=GET_EVENTS_TIMEOUT) - assert isinstance(events[0], TopologyEvent) - assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[0].topology_id.context_id.context_uuid.uuid == context_uuid - assert events[0].topology_id.topology_uuid.uuid == topology_uuid - assert isinstance(events[1], ContextEvent) - assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[1].context_id.context_uuid.uuid == context_uuid + #events = events_collector.get_events(block=True, count=2, timeout=GET_EVENTS_TIMEOUT) + #assert isinstance(events[0], TopologyEvent) + #assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert events[0].topology_id.context_id.context_uuid.uuid == context_uuid + #assert events[0].topology_id.topology_uuid.uuid == topology_uuid + #assert isinstance(events[1], ContextEvent) + #assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert events[1].context_id.context_uuid.uuid == context_uuid # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- - events_collector.stop() + #events_collector.stop() diff --git a/src/context/tests/test_link.py b/src/context/tests/test_link.py index e56a1889d..96021a449 100644 --- a/src/context/tests/test_link.py +++ b/src/context/tests/test_link.py @@ -12,14 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -import copy, grpc, pytest, time -from common.proto.context_pb2 import ( - Context, ContextEvent, ContextId, Device, DeviceEvent, DeviceId, Empty, EventTypeEnum, Link, LinkEvent, LinkId, - Topology, TopologyEvent, TopologyId) +import copy, grpc, pytest #, time +from common.proto.context_pb2 import Context, ContextId, Device, DeviceId, Empty, Link, LinkId, Topology, TopologyId +#from common.proto.context_pb2 import ContextEvent, DeviceEvent, EventTypeEnum, LinkEvent, TopologyEvent from context.client.ContextClient import ContextClient -from context.client.EventsCollector import EventsCollector +#from context.client.EventsCollector import EventsCollector from context.service.database.uuids.Link import link_get_uuid -from .Constants import GET_EVENTS_TIMEOUT +#from .Constants import GET_EVENTS_TIMEOUT from .Objects import ( CONTEXT, CONTEXT_ID, DEVICE_R1, DEVICE_R1_ID, DEVICE_R2, DEVICE_R2_ID, LINK_R1_R2, LINK_R1_R2_ID, LINK_R1_R2_NAME, TOPOLOGY, TOPOLOGY_ID) @@ -28,13 +27,13 @@ from .Objects import ( def test_link(context_client : ContextClient) -> None: # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- - events_collector = EventsCollector( - context_client, log_events_received=True, - activate_context_collector = True, activate_topology_collector = True, activate_device_collector = True, - activate_link_collector = True, activate_service_collector = False, activate_slice_collector = False, - activate_connection_collector = False) - events_collector.start() - time.sleep(3) + #events_collector = EventsCollector( + # context_client, log_events_received=True, + # activate_context_collector = True, activate_topology_collector = True, activate_device_collector = True, + # activate_link_collector = True, activate_service_collector = False, activate_slice_collector = False, + # activate_connection_collector = False) + #events_collector.start() + #time.sleep(3) # ----- Prepare dependencies for the test and capture related events ----------------------------------------------- response = context_client.SetContext(Context(**CONTEXT)) @@ -50,20 +49,20 @@ def test_link(context_client : ContextClient) -> None: response = context_client.SetDevice(Device(**DEVICE_R2)) device_r2_uuid = response.device_uuid.uuid - events = events_collector.get_events(block=True, count=4, timeout=GET_EVENTS_TIMEOUT) - assert isinstance(events[0], ContextEvent) - assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[0].context_id.context_uuid.uuid == context_uuid - assert isinstance(events[1], TopologyEvent) - assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[1].topology_id.context_id.context_uuid.uuid == context_uuid - assert events[1].topology_id.topology_uuid.uuid == topology_uuid - assert isinstance(events[2], DeviceEvent) - assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[2].device_id.device_uuid.uuid == device_r1_uuid - assert isinstance(events[3], DeviceEvent) - assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[3].device_id.device_uuid.uuid == device_r2_uuid + #events = events_collector.get_events(block=True, count=4, timeout=GET_EVENTS_TIMEOUT) + #assert isinstance(events[0], ContextEvent) + #assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert events[0].context_id.context_uuid.uuid == context_uuid + #assert isinstance(events[1], TopologyEvent) + #assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert events[1].topology_id.context_id.context_uuid.uuid == context_uuid + #assert events[1].topology_id.topology_uuid.uuid == topology_uuid + #assert isinstance(events[2], DeviceEvent) + #assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert events[2].device_id.device_uuid.uuid == device_r1_uuid + #assert isinstance(events[3], DeviceEvent) + #assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert events[3].device_id.device_uuid.uuid == device_r2_uuid # ----- Get when the object does not exist ------------------------------------------------------------------------- link_id = LinkId(**LINK_R1_R2_ID) @@ -86,10 +85,10 @@ def test_link(context_client : ContextClient) -> None: assert response.link_uuid.uuid == link_uuid # ----- Check create event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) - assert isinstance(event, LinkEvent) - assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert event.link_id.link_uuid.uuid == link_uuid + #event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) + #assert isinstance(event, LinkEvent) + #assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert event.link_id.link_uuid.uuid == link_uuid # ----- Get when the object exists --------------------------------------------------------------------------------- response = context_client.GetLink(LinkId(**LINK_R1_R2_ID)) @@ -116,10 +115,10 @@ def test_link(context_client : ContextClient) -> None: assert response.link_uuid.uuid == link_uuid # ----- Check update event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) - assert isinstance(event, LinkEvent) - assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - assert event.link_id.link_uuid.uuid == link_uuid + #event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) + #assert isinstance(event, LinkEvent) + #assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + #assert event.link_id.link_uuid.uuid == link_uuid # ----- Get when the object is modified ---------------------------------------------------------------------------- response = context_client.GetLink(LinkId(**LINK_R1_R2_ID)) @@ -152,10 +151,10 @@ def test_link(context_client : ContextClient) -> None: context_client.RemoveLink(LinkId(**LINK_R1_R2_ID)) # ----- Check remove event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) - assert isinstance(event, LinkEvent) - assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert event.link_id.link_uuid.uuid == link_uuid + #event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) + #assert isinstance(event, LinkEvent) + #assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert event.link_id.link_uuid.uuid == link_uuid # ----- List after deleting the object ----------------------------------------------------------------------------- response = context_client.ListLinkIds(Empty()) @@ -178,20 +177,20 @@ def test_link(context_client : ContextClient) -> None: context_client.RemoveTopology(TopologyId(**TOPOLOGY_ID)) context_client.RemoveContext(ContextId(**CONTEXT_ID)) - events = events_collector.get_events(block=True, count=4, timeout=GET_EVENTS_TIMEOUT) - assert isinstance(events[0], DeviceEvent) - assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[0].device_id.device_uuid.uuid == device_r1_uuid - assert isinstance(events[1], DeviceEvent) - assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[1].device_id.device_uuid.uuid == device_r2_uuid - assert isinstance(events[2], TopologyEvent) - assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[2].topology_id.context_id.context_uuid.uuid == context_uuid - assert events[2].topology_id.topology_uuid.uuid == topology_uuid - assert isinstance(events[3], ContextEvent) - assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[3].context_id.context_uuid.uuid == context_uuid + #events = events_collector.get_events(block=True, count=4, timeout=GET_EVENTS_TIMEOUT) + #assert isinstance(events[0], DeviceEvent) + #assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert events[0].device_id.device_uuid.uuid == device_r1_uuid + #assert isinstance(events[1], DeviceEvent) + #assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert events[1].device_id.device_uuid.uuid == device_r2_uuid + #assert isinstance(events[2], TopologyEvent) + #assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert events[2].topology_id.context_id.context_uuid.uuid == context_uuid + #assert events[2].topology_id.topology_uuid.uuid == topology_uuid + #assert isinstance(events[3], ContextEvent) + #assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert events[3].context_id.context_uuid.uuid == context_uuid # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- - events_collector.stop() + #events_collector.stop() diff --git a/src/context/tests/test_service.py b/src/context/tests/test_service.py index ca02a4a91..0de7b49f2 100644 --- a/src/context/tests/test_service.py +++ b/src/context/tests/test_service.py @@ -12,14 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -import copy, grpc, pytest, time +import copy, grpc, pytest #, time from common.proto.context_pb2 import ( - Context, ContextEvent, ContextId, Device, DeviceEvent, DeviceId, EventTypeEnum, Service, ServiceEvent, ServiceId, - ServiceStatusEnum, ServiceTypeEnum, Topology, TopologyEvent, TopologyId) + Context, ContextId, Device, DeviceId, Service, ServiceId, ServiceStatusEnum, ServiceTypeEnum, Topology, TopologyId) +#from common.proto.context_pb2 import ( +# ContextEvent, DeviceEvent, EventTypeEnum, ServiceEvent, TopologyEvent) from context.client.ContextClient import ContextClient +#from context.client.EventsCollector import EventsCollector from context.service.database.uuids.Service import service_get_uuid -from context.client.EventsCollector import EventsCollector -from .Constants import GET_EVENTS_TIMEOUT +#from .Constants import GET_EVENTS_TIMEOUT from .Objects import ( CONTEXT, CONTEXT_ID, CONTEXT_NAME, DEVICE_R1, DEVICE_R1_ID, SERVICE_R1_R2_NAME, DEVICE_R2, DEVICE_R2_ID, SERVICE_R1_R2, SERVICE_R1_R2_ID, TOPOLOGY, TOPOLOGY_ID) @@ -28,13 +29,13 @@ from .Objects import ( def test_service(context_client : ContextClient) -> None: # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- - events_collector = EventsCollector( - context_client, log_events_received=True, - activate_context_collector = True, activate_topology_collector = True, activate_device_collector = True, - activate_link_collector = True, activate_service_collector = True, activate_slice_collector = False, - activate_connection_collector = False) - events_collector.start() - time.sleep(3) + #events_collector = EventsCollector( + # context_client, log_events_received=True, + # activate_context_collector = True, activate_topology_collector = True, activate_device_collector = True, + # activate_link_collector = True, activate_service_collector = True, activate_slice_collector = False, + # activate_connection_collector = False) + #events_collector.start() + #time.sleep(3) # ----- Prepare dependencies for the test and capture related events ----------------------------------------------- response = context_client.SetContext(Context(**CONTEXT)) @@ -50,20 +51,20 @@ def test_service(context_client : ContextClient) -> None: response = context_client.SetDevice(Device(**DEVICE_R2)) device_r2_uuid = response.device_uuid.uuid - events = events_collector.get_events(block=True, count=4, timeout=GET_EVENTS_TIMEOUT) - assert isinstance(events[0], ContextEvent) - assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[0].context_id.context_uuid.uuid == context_uuid - assert isinstance(events[1], TopologyEvent) - assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[1].topology_id.context_id.context_uuid.uuid == context_uuid - assert events[1].topology_id.topology_uuid.uuid == topology_uuid - assert isinstance(events[2], DeviceEvent) - assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[2].device_id.device_uuid.uuid == device_r1_uuid - assert isinstance(events[3], DeviceEvent) - assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[3].device_id.device_uuid.uuid == device_r2_uuid + #events = events_collector.get_events(block=True, count=4, timeout=GET_EVENTS_TIMEOUT) + #assert isinstance(events[0], ContextEvent) + #assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert events[0].context_id.context_uuid.uuid == context_uuid + #assert isinstance(events[1], TopologyEvent) + #assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert events[1].topology_id.context_id.context_uuid.uuid == context_uuid + #assert events[1].topology_id.topology_uuid.uuid == topology_uuid + #assert isinstance(events[2], DeviceEvent) + #assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert events[2].device_id.device_uuid.uuid == device_r1_uuid + #assert isinstance(events[3], DeviceEvent) + #assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert events[3].device_id.device_uuid.uuid == device_r2_uuid # ----- Get when the object does not exist ------------------------------------------------------------------------- service_id = ServiceId(**SERVICE_R1_R2_ID) @@ -103,11 +104,11 @@ def test_service(context_client : ContextClient) -> None: assert response.service_uuid.uuid == service_uuid # ----- Check create event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) - assert isinstance(event, ServiceEvent) - assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert event.service_id.context_id.context_uuid.uuid == context_uuid - assert event.service_id.service_uuid.uuid == service_uuid + #event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) + #assert isinstance(event, ServiceEvent) + #assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert event.service_id.context_id.context_uuid.uuid == context_uuid + #assert event.service_id.service_uuid.uuid == service_uuid # ----- Get when the object exists --------------------------------------------------------------------------------- response = context_client.GetContext(ContextId(**CONTEXT_ID)) @@ -156,11 +157,11 @@ def test_service(context_client : ContextClient) -> None: assert response.service_uuid.uuid == service_uuid # ----- Check update event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) - assert isinstance(event, ServiceEvent) - assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - assert event.service_id.context_id.context_uuid.uuid == context_uuid - assert event.service_id.service_uuid.uuid == service_uuid + #event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) + #assert isinstance(event, ServiceEvent) + #assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + #assert event.service_id.context_id.context_uuid.uuid == context_uuid + #assert event.service_id.service_uuid.uuid == service_uuid # ----- Get when the object is modified ---------------------------------------------------------------------------- response = context_client.GetService(ServiceId(**SERVICE_R1_R2_ID)) @@ -194,11 +195,11 @@ def test_service(context_client : ContextClient) -> None: context_client.RemoveService(ServiceId(**SERVICE_R1_R2_ID)) # ----- Check remove event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) - assert isinstance(event, ServiceEvent) - assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert event.service_id.context_id.context_uuid.uuid == context_uuid - assert event.service_id.service_uuid.uuid == service_uuid + #event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) + #assert isinstance(event, ServiceEvent) + #assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert event.service_id.context_id.context_uuid.uuid == context_uuid + #assert event.service_id.service_uuid.uuid == service_uuid # ----- List after deleting the object ----------------------------------------------------------------------------- response = context_client.GetContext(ContextId(**CONTEXT_ID)) @@ -218,20 +219,20 @@ def test_service(context_client : ContextClient) -> None: context_client.RemoveTopology(TopologyId(**TOPOLOGY_ID)) context_client.RemoveContext(ContextId(**CONTEXT_ID)) - events = events_collector.get_events(block=True, count=4, timeout=GET_EVENTS_TIMEOUT) - assert isinstance(events[0], DeviceEvent) - assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[0].device_id.device_uuid.uuid == device_r1_uuid - assert isinstance(events[1], DeviceEvent) - assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[1].device_id.device_uuid.uuid == device_r2_uuid - assert isinstance(events[2], TopologyEvent) - assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[2].topology_id.context_id.context_uuid.uuid == context_uuid - assert events[2].topology_id.topology_uuid.uuid == topology_uuid - assert isinstance(events[3], ContextEvent) - assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[3].context_id.context_uuid.uuid == context_uuid + #events = events_collector.get_events(block=True, count=4, timeout=GET_EVENTS_TIMEOUT) + #assert isinstance(events[0], DeviceEvent) + #assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert events[0].device_id.device_uuid.uuid == device_r1_uuid + #assert isinstance(events[1], DeviceEvent) + #assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert events[1].device_id.device_uuid.uuid == device_r2_uuid + #assert isinstance(events[2], TopologyEvent) + #assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert events[2].topology_id.context_id.context_uuid.uuid == context_uuid + #assert events[2].topology_id.topology_uuid.uuid == topology_uuid + #assert isinstance(events[3], ContextEvent) + #assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert events[3].context_id.context_uuid.uuid == context_uuid # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- - events_collector.stop() + #events_collector.stop() diff --git a/src/context/tests/test_slice.py b/src/context/tests/test_slice.py index 1008e7e91..22b2eeb89 100644 --- a/src/context/tests/test_slice.py +++ b/src/context/tests/test_slice.py @@ -14,12 +14,14 @@ import copy, grpc, pytest, time from common.proto.context_pb2 import ( - Context, ContextEvent, ContextId, Device, DeviceEvent, DeviceId, EventTypeEnum, Link, LinkEvent, LinkId, Service, - ServiceEvent, ServiceId, Slice, SliceEvent, SliceId, SliceStatusEnum, Topology, TopologyEvent, TopologyId) + Context, ContextId, Device, DeviceId, Link, LinkId, Service, ServiceId, Slice, SliceId, SliceStatusEnum, Topology, + TopologyId) +#from common.proto.context_pb2 import ( +# ContextEvent, DeviceEvent, EventTypeEnum, LinkEvent, ServiceEvent, SliceEvent, TopologyEvent) from context.client.ContextClient import ContextClient +#from context.client.EventsCollector import EventsCollector from context.service.database.uuids.Slice import slice_get_uuid -from context.client.EventsCollector import EventsCollector -from .Constants import GET_EVENTS_TIMEOUT +#from .Constants import GET_EVENTS_TIMEOUT from .Objects import ( CONTEXT, CONTEXT_ID, CONTEXT_NAME, DEVICE_R1, DEVICE_R1_ID, DEVICE_R2, DEVICE_R2_ID, DEVICE_R3, DEVICE_R3_ID, LINK_R1_R2, LINK_R1_R2_ID, LINK_R1_R3, LINK_R1_R3_ID, LINK_R2_R3, LINK_R2_R3_ID, SERVICE_R1_R2, SERVICE_R1_R2_ID, @@ -29,13 +31,13 @@ from .Objects import ( def test_slice(context_client : ContextClient) -> None: # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- - events_collector = EventsCollector( - context_client, log_events_received=True, - activate_context_collector = True, activate_topology_collector = True, activate_device_collector = True, - activate_link_collector = True, activate_service_collector = True, activate_slice_collector = True, - activate_connection_collector = False) - events_collector.start() - time.sleep(3) + #events_collector = EventsCollector( + # context_client, log_events_received=True, + # activate_context_collector = True, activate_topology_collector = True, activate_device_collector = True, + # activate_link_collector = True, activate_service_collector = True, activate_slice_collector = True, + # activate_connection_collector = False) + #events_collector.start() + #time.sleep(3) # ----- Prepare dependencies for the test and capture related events ----------------------------------------------- response = context_client.SetContext(Context(**CONTEXT)) @@ -71,40 +73,40 @@ def test_slice(context_client : ContextClient) -> None: assert response.context_id.context_uuid.uuid == context_uuid service_r2_r3_uuid = response.service_uuid.uuid - events = events_collector.get_events(block=True, count=10, timeout=GET_EVENTS_TIMEOUT) - assert isinstance(events[0], ContextEvent) - assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[0].context_id.context_uuid.uuid == context_uuid - assert isinstance(events[1], TopologyEvent) - assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[1].topology_id.context_id.context_uuid.uuid == context_uuid - assert events[1].topology_id.topology_uuid.uuid == topology_uuid - assert isinstance(events[2], DeviceEvent) - assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[2].device_id.device_uuid.uuid == device_r1_uuid - assert isinstance(events[3], DeviceEvent) - assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[3].device_id.device_uuid.uuid == device_r2_uuid - assert isinstance(events[4], DeviceEvent) - assert events[4].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[4].device_id.device_uuid.uuid == device_r3_uuid - assert isinstance(events[5], LinkEvent) - assert events[5].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[5].link_id.link_uuid.uuid == link_r1_r2_uuid - assert isinstance(events[6], LinkEvent) - assert events[6].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[6].link_id.link_uuid.uuid == link_r1_r3_uuid - assert isinstance(events[7], LinkEvent) - assert events[7].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[7].link_id.link_uuid.uuid == link_r2_r3_uuid - assert isinstance(events[8], ServiceEvent) - assert events[8].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[8].service_id.context_id.context_uuid.uuid == context_uuid - assert events[8].service_id.service_uuid.uuid == service_r1_r2_uuid - assert isinstance(events[9], ServiceEvent) - assert events[9].event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert events[9].service_id.context_id.context_uuid.uuid == context_uuid - assert events[9].service_id.service_uuid.uuid == service_r2_r3_uuid + #events = events_collector.get_events(block=True, count=10, timeout=GET_EVENTS_TIMEOUT) + #assert isinstance(events[0], ContextEvent) + #assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert events[0].context_id.context_uuid.uuid == context_uuid + #assert isinstance(events[1], TopologyEvent) + #assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert events[1].topology_id.context_id.context_uuid.uuid == context_uuid + #assert events[1].topology_id.topology_uuid.uuid == topology_uuid + #assert isinstance(events[2], DeviceEvent) + #assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert events[2].device_id.device_uuid.uuid == device_r1_uuid + #assert isinstance(events[3], DeviceEvent) + #assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert events[3].device_id.device_uuid.uuid == device_r2_uuid + #assert isinstance(events[4], DeviceEvent) + #assert events[4].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert events[4].device_id.device_uuid.uuid == device_r3_uuid + #assert isinstance(events[5], LinkEvent) + #assert events[5].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert events[5].link_id.link_uuid.uuid == link_r1_r2_uuid + #assert isinstance(events[6], LinkEvent) + #assert events[6].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert events[6].link_id.link_uuid.uuid == link_r1_r3_uuid + #assert isinstance(events[7], LinkEvent) + #assert events[7].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert events[7].link_id.link_uuid.uuid == link_r2_r3_uuid + #assert isinstance(events[8], ServiceEvent) + #assert events[8].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert events[8].service_id.context_id.context_uuid.uuid == context_uuid + #assert events[8].service_id.service_uuid.uuid == service_r1_r2_uuid + #assert isinstance(events[9], ServiceEvent) + #assert events[9].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert events[9].service_id.context_id.context_uuid.uuid == context_uuid + #assert events[9].service_id.service_uuid.uuid == service_r2_r3_uuid # ----- Get when the object does not exist ------------------------------------------------------------------------- slice_id = SliceId(**SLICE_R1_R3_ID) @@ -144,11 +146,11 @@ def test_slice(context_client : ContextClient) -> None: assert response.slice_uuid.uuid == slice_uuid # ----- Check create event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) - assert isinstance(event, SliceEvent) - assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert event.slice_id.context_id.context_uuid.uuid == context_uuid - assert event.slice_id.slice_uuid.uuid == slice_uuid + #event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) + #assert isinstance(event, SliceEvent) + #assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert event.slice_id.context_id.context_uuid.uuid == context_uuid + #assert event.slice_id.slice_uuid.uuid == slice_uuid # ----- Get when the object exists --------------------------------------------------------------------------------- response = context_client.GetContext(ContextId(**CONTEXT_ID)) @@ -195,11 +197,11 @@ def test_slice(context_client : ContextClient) -> None: assert response.slice_uuid.uuid == slice_uuid # ----- Check update event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) - assert isinstance(event, SliceEvent) - assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - assert event.slice_id.context_id.context_uuid.uuid == context_uuid - assert event.slice_id.slice_uuid.uuid == slice_uuid + #event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) + #assert isinstance(event, SliceEvent) + #assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + #assert event.slice_id.context_id.context_uuid.uuid == context_uuid + #assert event.slice_id.slice_uuid.uuid == slice_uuid # ----- Get when the object is modified ---------------------------------------------------------------------------- response = context_client.GetSlice(SliceId(**SLICE_R1_R3_ID)) @@ -231,11 +233,11 @@ def test_slice(context_client : ContextClient) -> None: context_client.RemoveSlice(SliceId(**SLICE_R1_R3_ID)) # ----- Check remove event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) - assert isinstance(event, SliceEvent) - assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert event.slice_id.context_id.context_uuid.uuid == context_uuid - assert event.slice_id.slice_uuid.uuid == slice_uuid + #event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) + #assert isinstance(event, SliceEvent) + #assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert event.slice_id.context_id.context_uuid.uuid == context_uuid + #assert event.slice_id.slice_uuid.uuid == slice_uuid # ----- List after deleting the object ----------------------------------------------------------------------------- response = context_client.GetContext(ContextId(**CONTEXT_ID)) @@ -261,40 +263,40 @@ def test_slice(context_client : ContextClient) -> None: context_client.RemoveTopology(TopologyId(**TOPOLOGY_ID)) context_client.RemoveContext(ContextId(**CONTEXT_ID)) - events = events_collector.get_events(block=True, count=10) - assert isinstance(events[0], ServiceEvent) - assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[0].service_id.context_id.context_uuid.uuid == context_uuid - assert events[0].service_id.service_uuid.uuid == service_r1_r2_uuid - assert isinstance(events[1], ServiceEvent) - assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[1].service_id.context_id.context_uuid.uuid == context_uuid - assert events[1].service_id.service_uuid.uuid == service_r2_r3_uuid - assert isinstance(events[2], LinkEvent) - assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[2].link_id.link_uuid.uuid == link_r1_r2_uuid - assert isinstance(events[3], LinkEvent) - assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[3].link_id.link_uuid.uuid == link_r1_r3_uuid - assert isinstance(events[4], LinkEvent) - assert events[4].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[4].link_id.link_uuid.uuid == link_r2_r3_uuid - assert isinstance(events[5], DeviceEvent) - assert events[5].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[5].device_id.device_uuid.uuid == device_r1_uuid - assert isinstance(events[6], DeviceEvent) - assert events[6].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[6].device_id.device_uuid.uuid == device_r2_uuid - assert isinstance(events[7], DeviceEvent) - assert events[7].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[7].device_id.device_uuid.uuid == device_r3_uuid - assert isinstance(events[8], TopologyEvent) - assert events[8].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[8].topology_id.context_id.context_uuid.uuid == context_uuid - assert events[8].topology_id.topology_uuid.uuid == topology_uuid - assert isinstance(events[9], ContextEvent) - assert events[9].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert events[9].context_id.context_uuid.uuid == context_uuid + #events = events_collector.get_events(block=True, count=10) + #assert isinstance(events[0], ServiceEvent) + #assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert events[0].service_id.context_id.context_uuid.uuid == context_uuid + #assert events[0].service_id.service_uuid.uuid == service_r1_r2_uuid + #assert isinstance(events[1], ServiceEvent) + #assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert events[1].service_id.context_id.context_uuid.uuid == context_uuid + #assert events[1].service_id.service_uuid.uuid == service_r2_r3_uuid + #assert isinstance(events[2], LinkEvent) + #assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert events[2].link_id.link_uuid.uuid == link_r1_r2_uuid + #assert isinstance(events[3], LinkEvent) + #assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert events[3].link_id.link_uuid.uuid == link_r1_r3_uuid + #assert isinstance(events[4], LinkEvent) + #assert events[4].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert events[4].link_id.link_uuid.uuid == link_r2_r3_uuid + #assert isinstance(events[5], DeviceEvent) + #assert events[5].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert events[5].device_id.device_uuid.uuid == device_r1_uuid + #assert isinstance(events[6], DeviceEvent) + #assert events[6].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert events[6].device_id.device_uuid.uuid == device_r2_uuid + #assert isinstance(events[7], DeviceEvent) + #assert events[7].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert events[7].device_id.device_uuid.uuid == device_r3_uuid + #assert isinstance(events[8], TopologyEvent) + #assert events[8].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert events[8].topology_id.context_id.context_uuid.uuid == context_uuid + #assert events[8].topology_id.topology_uuid.uuid == topology_uuid + #assert isinstance(events[9], ContextEvent) + #assert events[9].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert events[9].context_id.context_uuid.uuid == context_uuid # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- - events_collector.stop() + #events_collector.stop() diff --git a/src/context/tests/test_topology.py b/src/context/tests/test_topology.py index 0d8b8c027..a2afd9643 100644 --- a/src/context/tests/test_topology.py +++ b/src/context/tests/test_topology.py @@ -12,12 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -import copy, grpc, pytest, time +import copy, grpc, pytest #, time from common.proto.context_pb2 import ( Context, ContextEvent, ContextId, EventTypeEnum, Topology, TopologyEvent, TopologyId) from context.client.ContextClient import ContextClient +#from context.client.EventsCollector import EventsCollector from context.service.database.uuids.Topology import topology_get_uuid -from context.client.EventsCollector import EventsCollector from .Constants import GET_EVENTS_TIMEOUT from .Objects import CONTEXT, CONTEXT_ID, CONTEXT_NAME, TOPOLOGY, TOPOLOGY_ID, TOPOLOGY_NAME @@ -25,22 +25,22 @@ from .Objects import CONTEXT, CONTEXT_ID, CONTEXT_NAME, TOPOLOGY, TOPOLOGY_ID, T def test_topology(context_client : ContextClient) -> None: # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- - events_collector = EventsCollector( - context_client, log_events_received=True, - activate_context_collector = True, activate_topology_collector = True, activate_device_collector = False, - activate_link_collector = False, activate_service_collector = False, activate_slice_collector = False, - activate_connection_collector = False) - events_collector.start() - time.sleep(3) # wait for the events collector to start + #events_collector = EventsCollector( + # context_client, log_events_received=True, + # activate_context_collector = True, activate_topology_collector = True, activate_device_collector = False, + # activate_link_collector = False, activate_service_collector = False, activate_slice_collector = False, + # activate_connection_collector = False) + #events_collector.start() + #time.sleep(3) # wait for the events collector to start # ----- Prepare dependencies for the test and capture related events ----------------------------------------------- response = context_client.SetContext(Context(**CONTEXT)) context_uuid = response.context_uuid.uuid - event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) - assert isinstance(event, ContextEvent) - assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert event.context_id.context_uuid.uuid == context_uuid + #event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) + #assert isinstance(event, ContextEvent) + #assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert event.context_id.context_uuid.uuid == context_uuid # ----- Get when the object does not exist ------------------------------------------------------------------------- topology_id = TopologyId(**TOPOLOGY_ID) @@ -69,11 +69,11 @@ def test_topology(context_client : ContextClient) -> None: assert response.topology_uuid.uuid == topology_uuid # ----- Check create event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) - assert isinstance(event, TopologyEvent) - assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE - assert event.topology_id.context_id.context_uuid.uuid == context_uuid - assert event.topology_id.topology_uuid.uuid == topology_uuid + #event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) + #assert isinstance(event, TopologyEvent) + #assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE + #assert event.topology_id.context_id.context_uuid.uuid == context_uuid + #assert event.topology_id.topology_uuid.uuid == topology_uuid # ----- Get when the object exists --------------------------------------------------------------------------------- response = context_client.GetContext(ContextId(**CONTEXT_ID)) @@ -115,11 +115,11 @@ def test_topology(context_client : ContextClient) -> None: assert response.topology_uuid.uuid == topology_uuid # ----- Check update event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) - assert isinstance(event, TopologyEvent) - assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE - assert event.topology_id.context_id.context_uuid.uuid == context_uuid - assert event.topology_id.topology_uuid.uuid == topology_uuid + #event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) + #assert isinstance(event, TopologyEvent) + #assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE + #assert event.topology_id.context_id.context_uuid.uuid == context_uuid + #assert event.topology_id.topology_uuid.uuid == topology_uuid # ----- Get when the object is modified ---------------------------------------------------------------------------- response = context_client.GetTopology(TopologyId(**TOPOLOGY_ID)) @@ -147,11 +147,11 @@ def test_topology(context_client : ContextClient) -> None: context_client.RemoveTopology(TopologyId(**TOPOLOGY_ID)) # ----- Check remove event ----------------------------------------------------------------------------------------- - event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) - assert isinstance(event, TopologyEvent) - assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert event.topology_id.context_id.context_uuid.uuid == context_uuid - assert event.topology_id.topology_uuid.uuid == topology_uuid + #event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) + #assert isinstance(event, TopologyEvent) + #assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert event.topology_id.context_id.context_uuid.uuid == context_uuid + #assert event.topology_id.topology_uuid.uuid == topology_uuid # ----- List after deleting the object ----------------------------------------------------------------------------- response = context_client.GetContext(ContextId(**CONTEXT_ID)) @@ -168,10 +168,10 @@ def test_topology(context_client : ContextClient) -> None: # ----- Clean dependencies used in the test and capture related events --------------------------------------------- context_client.RemoveContext(ContextId(**CONTEXT_ID)) - event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) - assert isinstance(event, ContextEvent) - assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE - assert event.context_id.context_uuid.uuid == context_uuid + #event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT) + #assert isinstance(event, ContextEvent) + #assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + #assert event.context_id.context_uuid.uuid == context_uuid # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- - events_collector.stop() + #events_collector.stop() -- GitLab From efb6b5038d8ad51f8e828fa0c63b3aa19a6f5f52 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Thu, 19 Jan 2023 15:31:22 +0000 Subject: [PATCH 062/158] Context component: - updated manifest file --- manifests/contextservice.yaml | 41 +++++++++++++++-------------------- 1 file changed, 17 insertions(+), 24 deletions(-) diff --git a/manifests/contextservice.yaml b/manifests/contextservice.yaml index bdf012278..74955dc6f 100644 --- a/manifests/contextservice.yaml +++ b/manifests/contextservice.yaml @@ -20,7 +20,7 @@ spec: selector: matchLabels: app: contextservice - replicas: 1 + replicas: 5 template: metadata: labels: @@ -28,33 +28,30 @@ spec: spec: terminationGracePeriodSeconds: 5 containers: - - name: redis - image: redis:6.2 - ports: - - containerPort: 6379 - resources: - requests: - cpu: 100m - memory: 128Mi - limits: - cpu: 500m - memory: 1024Mi + #- name: redis + # image: redis:6.2 + # ports: + # - containerPort: 6379 + # resources: + # requests: + # cpu: 100m + # memory: 128Mi + # limits: + # cpu: 500m + # memory: 1024Mi - name: server image: registry.gitlab.com/teraflow-h2020/controller/context:latest imagePullPolicy: Always ports: - containerPort: 1010 - - containerPort: 8080 - containerPort: 9192 env: - - name: CCDB_URL - value: "cockroachdb://tfs:tfs123@10.1.7.195:26257/tfs?sslmode=require" - - name: DB_BACKEND - value: "redis" + - name: CRDB_URI + value: "cockroachdb://tfs:tfs123@cockroachdb-public.crdb.svc.cluster.local:26257/tfs?sslmode=require" - name: MB_BACKEND - value: "redis" - - name: REDIS_DATABASE_ID - value: "0" + value: "inmemory" + #- name: NATS_URI + # value: "nats://tfs:tfs123@nats-public.nats.svc.cluster.local:4222" - name: LOG_LEVEL value: "INFO" readinessProbe: @@ -86,10 +83,6 @@ spec: protocol: TCP port: 1010 targetPort: 1010 - - name: http - protocol: TCP - port: 8080 - targetPort: 8080 - name: metrics protocol: TCP port: 9192 -- GitLab From f468b10423eb424a83846804c4528df5a5d3c7bc Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Thu, 19 Jan 2023 15:43:45 +0000 Subject: [PATCH 063/158] Device component: - updated definition of constant DEFAULT_CONTEXT_UUID to DEFAULT_CONTEXT_NAME - updated definition of constant DEFAULT_TOPOLOGY_UUID to DEFAULT_TOPOLOGY_NAME --- src/device/tests/CommonObjects.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/device/tests/CommonObjects.py b/src/device/tests/CommonObjects.py index 61f0b44cd..5613d22b2 100644 --- a/src/device/tests/CommonObjects.py +++ b/src/device/tests/CommonObjects.py @@ -12,19 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID +from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME from common.proto.kpi_sample_types_pb2 import KpiSampleType from common.tools.object_factory.Context import json_context, json_context_id from common.tools.object_factory.Topology import json_topology, json_topology_id # ----- Context -------------------------------------------------------------------------------------------------------- -CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_UUID) -CONTEXT = json_context(DEFAULT_CONTEXT_UUID) +CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_NAME) +CONTEXT = json_context(DEFAULT_CONTEXT_NAME) # ----- Topology ------------------------------------------------------------------------------------------------------- -TOPOLOGY_ID = json_topology_id(DEFAULT_TOPOLOGY_UUID, context_id=CONTEXT_ID) -TOPOLOGY = json_topology(DEFAULT_TOPOLOGY_UUID, context_id=CONTEXT_ID) +TOPOLOGY_ID = json_topology_id(DEFAULT_TOPOLOGY_NAME, context_id=CONTEXT_ID) +TOPOLOGY = json_topology(DEFAULT_TOPOLOGY_NAME, context_id=CONTEXT_ID) # ----- KPI Sample Types ----------------------------------------------------------------------------------------------- -- GitLab From 53b95b8ef60d560ecc6f76645472fe98358a8554 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Thu, 19 Jan 2023 15:44:00 +0000 Subject: [PATCH 064/158] Service component: - updated definition of constant DEFAULT_CONTEXT_UUID to DEFAULT_CONTEXT_NAME - updated definition of constant DEFAULT_TOPOLOGY_UUID to DEFAULT_TOPOLOGY_NAME --- src/service/tests/CommonObjects.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/service/tests/CommonObjects.py b/src/service/tests/CommonObjects.py index 7792ad61d..b84846ca4 100644 --- a/src/service/tests/CommonObjects.py +++ b/src/service/tests/CommonObjects.py @@ -12,18 +12,18 @@ # See the License for the specific language governing permissions and # limitations under the License. -from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID +from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME from common.proto.kpi_sample_types_pb2 import KpiSampleType from common.tools.object_factory.Context import json_context, json_context_id from common.tools.object_factory.Topology import json_topology, json_topology_id # ----- Context -------------------------------------------------------------------------------------------------------- -CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_UUID) -CONTEXT = json_context(DEFAULT_CONTEXT_UUID) +CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_NAME) +CONTEXT = json_context(DEFAULT_CONTEXT_NAME) # ----- Topology ------------------------------------------------------------------------------------------------------- -TOPOLOGY_ID = json_topology_id(DEFAULT_TOPOLOGY_UUID, context_id=CONTEXT_ID) -TOPOLOGY = json_topology(DEFAULT_TOPOLOGY_UUID, context_id=CONTEXT_ID) +TOPOLOGY_ID = json_topology_id(DEFAULT_TOPOLOGY_NAME, context_id=CONTEXT_ID) +TOPOLOGY = json_topology(DEFAULT_TOPOLOGY_NAME, context_id=CONTEXT_ID) # ----- Monitoring Samples --------------------------------------------------------------------------------------------- PACKET_PORT_SAMPLE_TYPES = [ -- GitLab From 3b82181e93760f2f74abdb2eb926d08cf0026c30 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Thu, 19 Jan 2023 15:44:37 +0000 Subject: [PATCH 065/158] PathComp component: - updated definition of constant DEFAULT_CONTEXT_UUID to DEFAULT_CONTEXT_NAME - updated definition of constant DEFAULT_TOPOLOGY_UUID to DEFAULT_TOPOLOGY_NAME - updated definition of constant INTERDOMAIN_TOPOLOGY_UUID to INTERDOMAIN_TOPOLOGY_NAME --- .../frontend/service/PathCompServiceServicerImpl.py | 8 ++++---- .../service/algorithms/tools/ComposeRequest.py | 12 ++++++------ src/pathcomp/frontend/tests/Objects_A_B_C.py | 8 ++++---- src/pathcomp/frontend/tests/Objects_DC_CSGW_TN.py | 8 ++++---- .../frontend/tests/Objects_DC_CSGW_TN_OLS.py | 8 ++++---- 5 files changed, 22 insertions(+), 22 deletions(-) diff --git a/src/pathcomp/frontend/service/PathCompServiceServicerImpl.py b/src/pathcomp/frontend/service/PathCompServiceServicerImpl.py index ca4132754..9f4cd7333 100644 --- a/src/pathcomp/frontend/service/PathCompServiceServicerImpl.py +++ b/src/pathcomp/frontend/service/PathCompServiceServicerImpl.py @@ -13,7 +13,7 @@ # limitations under the License. import grpc, logging, threading -from common.Constants import DEFAULT_CONTEXT_UUID, INTERDOMAIN_TOPOLOGY_UUID +from common.Constants import DEFAULT_CONTEXT_NAME, INTERDOMAIN_TOPOLOGY_NAME from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method from common.proto.context_pb2 import ContextId, Empty from common.proto.pathcomp_pb2 import PathCompReply, PathCompRequest @@ -30,7 +30,7 @@ LOGGER = logging.getLogger(__name__) METRICS_POOL = MetricsPool('PathComp', 'RPC') -ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_UUID)) +ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) class PathCompServiceServicerImpl(PathCompServiceServicer): def __init__(self) -> None: @@ -45,8 +45,8 @@ class PathCompServiceServicerImpl(PathCompServiceServicer): context_client = ContextClient() if (len(request.services) == 1) and is_inter_domain(context_client, request.services[0].service_endpoint_ids): - devices = get_devices_in_topology(context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_UUID) - links = get_links_in_topology(context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_UUID) + devices = get_devices_in_topology(context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_NAME) + links = get_links_in_topology(context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_NAME) else: # TODO: improve filtering of devices and links # TODO: add contexts, topologies, and membership of devices/links in topologies diff --git a/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py b/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py index 17a7e74ef..0a424bf8b 100644 --- a/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py +++ b/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py @@ -14,7 +14,7 @@ import logging from typing import Dict -from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID +from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME from common.proto.context_pb2 import Constraint, Device, EndPointId, Link, Service, ServiceId, TopologyId from common.tools.grpc.Tools import grpc_message_to_json_string from .ConstantsMappings import ( @@ -28,17 +28,17 @@ def compose_topology_id(topology_id : TopologyId) -> Dict: context_uuid = topology_id.context_id.context_uuid.uuid topology_uuid = topology_id.topology_uuid.uuid - if len(context_uuid) == 0: context_uuid = DEFAULT_CONTEXT_UUID - if len(topology_uuid) == 0: topology_uuid = DEFAULT_TOPOLOGY_UUID + if len(context_uuid) == 0: context_uuid = DEFAULT_CONTEXT_NAME + if len(topology_uuid) == 0: topology_uuid = DEFAULT_TOPOLOGY_NAME return {'contextId': context_uuid, 'topology_uuid': topology_uuid} def compose_service_id(service_id : ServiceId) -> Dict: - # force context_uuid to be always DEFAULT_CONTEXT_UUID for simplicity + # force context_uuid to be always DEFAULT_CONTEXT_NAME for simplicity # for interdomain contexts are managed in a particular way #context_uuid = service_id.context_id.context_uuid.uuid - #if len(context_uuid) == 0: context_uuid = DEFAULT_CONTEXT_UUID - context_uuid = DEFAULT_CONTEXT_UUID + #if len(context_uuid) == 0: context_uuid = DEFAULT_CONTEXT_NAME + context_uuid = DEFAULT_CONTEXT_NAME service_uuid = service_id.service_uuid.uuid return {'contextId': context_uuid, 'service_uuid': service_uuid} diff --git a/src/pathcomp/frontend/tests/Objects_A_B_C.py b/src/pathcomp/frontend/tests/Objects_A_B_C.py index 510ebb674..2deab06f4 100644 --- a/src/pathcomp/frontend/tests/Objects_A_B_C.py +++ b/src/pathcomp/frontend/tests/Objects_A_B_C.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID +from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME from common.tools.object_factory.Constraint import json_constraint_custom from common.tools.object_factory.Context import json_context, json_context_id from common.tools.object_factory.Device import json_device_emulated_packet_router_disabled, json_device_id @@ -41,11 +41,11 @@ def compose_service(endpoint_a, endpoint_z, constraints=[]): return service # ----- Context -------------------------------------------------------------------------------------------------------- -CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_UUID) -CONTEXT = json_context(DEFAULT_CONTEXT_UUID) +CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_NAME) +CONTEXT = json_context(DEFAULT_CONTEXT_NAME) # ----- Domains -------------------------------------------------------------------------------------------------------- -TOPOLOGY_ADMIN_UUID = DEFAULT_TOPOLOGY_UUID +TOPOLOGY_ADMIN_UUID = DEFAULT_TOPOLOGY_NAME TOPOLOGY_ADMIN_ID = json_topology_id(TOPOLOGY_ADMIN_UUID, context_id=CONTEXT_ID) TOPOLOGY_ADMIN = json_topology(TOPOLOGY_ADMIN_UUID, context_id=CONTEXT_ID) diff --git a/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN.py b/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN.py index 06e9bbbc7..33483267b 100644 --- a/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN.py +++ b/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID +from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME from common.tools.object_factory.Constraint import json_constraint_custom from common.tools.object_factory.Context import json_context, json_context_id from common.tools.object_factory.Device import ( @@ -58,12 +58,12 @@ def compose_service(endpoint_a, endpoint_z, constraints=[]): return service # ----- Context -------------------------------------------------------------------------------------------------------- -CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_UUID) -CONTEXT = json_context(DEFAULT_CONTEXT_UUID) +CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_NAME) +CONTEXT = json_context(DEFAULT_CONTEXT_NAME) # ----- Domains -------------------------------------------------------------------------------------------------------- # Overall network topology -TOPO_ADMIN_UUID = DEFAULT_TOPOLOGY_UUID +TOPO_ADMIN_UUID = DEFAULT_TOPOLOGY_NAME TOPO_ADMIN_ID = json_topology_id(TOPO_ADMIN_UUID, context_id=CONTEXT_ID) TOPO_ADMIN = json_topology(TOPO_ADMIN_UUID, context_id=CONTEXT_ID) diff --git a/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN_OLS.py b/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN_OLS.py index 99fd83ed9..1ff3ff595 100644 --- a/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN_OLS.py +++ b/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN_OLS.py @@ -13,7 +13,7 @@ # limitations under the License. import uuid -from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID +from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME from common.tools.object_factory.Constraint import json_constraint_custom from common.tools.object_factory.Context import json_context, json_context_id from common.tools.object_factory.Device import ( @@ -68,12 +68,12 @@ def compose_service(endpoint_a, endpoint_z, constraints=[]): return service # ----- Context -------------------------------------------------------------------------------------------------------- -CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_UUID) -CONTEXT = json_context(DEFAULT_CONTEXT_UUID) +CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_NAME) +CONTEXT = json_context(DEFAULT_CONTEXT_NAME) # ----- Domains -------------------------------------------------------------------------------------------------------- # Overall network topology -TOPO_ADMIN_UUID = DEFAULT_TOPOLOGY_UUID +TOPO_ADMIN_UUID = DEFAULT_TOPOLOGY_NAME TOPO_ADMIN_ID = json_topology_id(TOPO_ADMIN_UUID, context_id=CONTEXT_ID) TOPO_ADMIN = json_topology(TOPO_ADMIN_UUID, context_id=CONTEXT_ID) -- GitLab From a3349a3a8637d9945c7d204104e95832375fdeb8 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Thu, 19 Jan 2023 15:48:01 +0000 Subject: [PATCH 066/158] Common - Tools - Context Queries: - updated definition of constant DEFAULT_CONTEXT_UUID to DEFAULT_CONTEXT_NAME - updated definition of constant DEFAULT_TOPOLOGY_UUID to DEFAULT_TOPOLOGY_NAME - updated definition of constant INTERDOMAIN_TOPOLOGY_UUID to INTERDOMAIN_TOPOLOGY_NAME --- .../tools/context_queries/InterDomain.py | 24 +++++++++---------- src/common/tools/context_queries/Service.py | 4 ++-- src/common/tools/context_queries/Slice.py | 4 ++-- src/common/tools/context_queries/Topology.py | 4 ++-- 4 files changed, 18 insertions(+), 18 deletions(-) diff --git a/src/common/tools/context_queries/InterDomain.py b/src/common/tools/context_queries/InterDomain.py index 0a202ccd8..ab804145d 100644 --- a/src/common/tools/context_queries/InterDomain.py +++ b/src/common/tools/context_queries/InterDomain.py @@ -14,7 +14,7 @@ import logging from typing import Dict, List, Set, Tuple -from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID, INTERDOMAIN_TOPOLOGY_UUID +from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME, INTERDOMAIN_TOPOLOGY_NAME from common.DeviceTypes import DeviceTypeEnum from common.proto.context_pb2 import ContextId, Device, Empty, EndPointId, ServiceTypeEnum, Slice from common.proto.pathcomp_pb2 import PathCompRequest @@ -28,7 +28,7 @@ from pathcomp.frontend.client.PathCompClient import PathCompClient LOGGER = logging.getLogger(__name__) -ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_UUID)) +ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) DATACENTER_DEVICE_TYPES = {DeviceTypeEnum.DATACENTER, DeviceTypeEnum.EMULATED_DATACENTER} def get_local_device_uuids(context_client : ContextClient) -> Set[str]: @@ -37,15 +37,15 @@ def get_local_device_uuids(context_client : ContextClient) -> Set[str]: LOGGER.info('[get_local_device_uuids] topologies.keys()={:s}'.format(str(topologies.keys()))) local_topology_uuids = set(topologies.keys()) - local_topology_uuids.discard(INTERDOMAIN_TOPOLOGY_UUID) + local_topology_uuids.discard(INTERDOMAIN_TOPOLOGY_NAME) LOGGER.info('[get_local_device_uuids] local_topology_uuids={:s}'.format(str(local_topology_uuids))) local_device_uuids = set() - # add topology names except DEFAULT_TOPOLOGY_UUID and INTERDOMAIN_TOPOLOGY_UUID; they are abstracted as a + # add topology names except DEFAULT_TOPOLOGY_NAME and INTERDOMAIN_TOPOLOGY_NAME; they are abstracted as a # local device in inter-domain and the name of the topology is used as abstract device name for local_topology_uuid in local_topology_uuids: - if local_topology_uuid == DEFAULT_TOPOLOGY_UUID: continue + if local_topology_uuid == DEFAULT_TOPOLOGY_NAME: continue local_device_uuids.add(local_topology_uuid) # add physical devices in the local topologies @@ -60,8 +60,8 @@ def get_local_device_uuids(context_client : ContextClient) -> Set[str]: return local_device_uuids def get_interdomain_device_uuids(context_client : ContextClient) -> Set[str]: - context_uuid = DEFAULT_CONTEXT_UUID - topology_uuid = INTERDOMAIN_TOPOLOGY_UUID + context_uuid = DEFAULT_CONTEXT_NAME + topology_uuid = INTERDOMAIN_TOPOLOGY_NAME interdomain_topology = get_topology(context_client, topology_uuid, context_uuid=context_uuid) if interdomain_topology is None: MSG = '[get_interdomain_device_uuids] {:s}/{:s} topology not found' @@ -186,13 +186,13 @@ def get_device_to_domain_map(context_client : ContextClient) -> Dict[str, str]: context_id = context.context_id context_uuid = context_id.context_uuid.uuid topologies = context_client.ListTopologies(context_id) - if context_uuid == DEFAULT_CONTEXT_UUID: + if context_uuid == DEFAULT_CONTEXT_NAME: for topology in topologies.topologies: topology_id = topology.topology_id topology_uuid = topology_id.topology_uuid.uuid - if topology_uuid in {DEFAULT_TOPOLOGY_UUID, INTERDOMAIN_TOPOLOGY_UUID}: continue + if topology_uuid in {DEFAULT_TOPOLOGY_NAME, INTERDOMAIN_TOPOLOGY_NAME}: continue - # add topology names except DEFAULT_TOPOLOGY_UUID and INTERDOMAIN_TOPOLOGY_UUID; they are + # add topology names except DEFAULT_TOPOLOGY_NAME and INTERDOMAIN_TOPOLOGY_NAME; they are # abstracted as a local device in inter-domain and the name of the topology is used as # abstract device name devices_to_domains[topology_uuid] = topology_uuid @@ -208,7 +208,7 @@ def get_device_to_domain_map(context_client : ContextClient) -> Dict[str, str]: topology_uuid = topology_id.topology_uuid.uuid # if topology is not interdomain - if topology_uuid in {INTERDOMAIN_TOPOLOGY_UUID}: continue + if topology_uuid in {INTERDOMAIN_TOPOLOGY_NAME}: continue # add devices to the remote domain list for device_id in topology.device_ids: @@ -224,7 +224,7 @@ def compute_traversed_domains( local_device_uuids = get_local_device_uuids(context_client) LOGGER.info('[compute_traversed_domains] local_device_uuids={:s}'.format(str(local_device_uuids))) - interdomain_devices = get_devices_in_topology(context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_UUID) + interdomain_devices = get_devices_in_topology(context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_NAME) interdomain_devices = { device.device_id.device_uuid.uuid : device for device in interdomain_devices diff --git a/src/common/tools/context_queries/Service.py b/src/common/tools/context_queries/Service.py index 15b201e73..b7ff4117b 100644 --- a/src/common/tools/context_queries/Service.py +++ b/src/common/tools/context_queries/Service.py @@ -14,14 +14,14 @@ import grpc, logging from typing import Optional -from common.Constants import DEFAULT_CONTEXT_UUID +from common.Constants import DEFAULT_CONTEXT_NAME from common.proto.context_pb2 import Service, ServiceId from context.client.ContextClient import ContextClient LOGGER = logging.getLogger(__name__) def get_service( - context_client : ContextClient, service_uuid : str, context_uuid : str = DEFAULT_CONTEXT_UUID, + context_client : ContextClient, service_uuid : str, context_uuid : str = DEFAULT_CONTEXT_NAME, rw_copy : bool = False ) -> Optional[Service]: try: diff --git a/src/common/tools/context_queries/Slice.py b/src/common/tools/context_queries/Slice.py index 9f884aa94..550b2edaa 100644 --- a/src/common/tools/context_queries/Slice.py +++ b/src/common/tools/context_queries/Slice.py @@ -14,14 +14,14 @@ import grpc, logging from typing import Optional -from common.Constants import DEFAULT_CONTEXT_UUID +from common.Constants import DEFAULT_CONTEXT_NAME from common.proto.context_pb2 import Slice, SliceId from context.client.ContextClient import ContextClient LOGGER = logging.getLogger(__name__) def get_slice( - context_client : ContextClient, slice_uuid : str, context_uuid : str = DEFAULT_CONTEXT_UUID, + context_client : ContextClient, slice_uuid : str, context_uuid : str = DEFAULT_CONTEXT_NAME, rw_copy : bool = False ) -> Optional[Slice]: try: diff --git a/src/common/tools/context_queries/Topology.py b/src/common/tools/context_queries/Topology.py index 3d2077e96..619babffd 100644 --- a/src/common/tools/context_queries/Topology.py +++ b/src/common/tools/context_queries/Topology.py @@ -14,7 +14,7 @@ import grpc, logging from typing import List, Optional -from common.Constants import DEFAULT_CONTEXT_UUID +from common.Constants import DEFAULT_CONTEXT_NAME from common.proto.context_pb2 import ContextId, Topology, TopologyId from common.tools.object_factory.Context import json_context_id from common.tools.object_factory.Topology import json_topology @@ -45,7 +45,7 @@ def create_missing_topologies( context_client.SetTopology(grpc_topology) def get_topology( - context_client : ContextClient, topology_uuid : str, context_uuid : str = DEFAULT_CONTEXT_UUID, + context_client : ContextClient, topology_uuid : str, context_uuid : str = DEFAULT_CONTEXT_NAME, rw_copy : bool = False ) -> Optional[Topology]: try: -- GitLab From 733dd98b053eb1531683491a04f4e817386a6f1a Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Thu, 19 Jan 2023 16:19:07 +0000 Subject: [PATCH 067/158] WebUI component: - corrected selection of context/topology based on new uuid/name separation --- src/webui/service/device/routes.py | 2 +- src/webui/service/link/routes.py | 2 +- src/webui/service/main/routes.py | 54 +++++++++++++++++++----------- 3 files changed, 37 insertions(+), 21 deletions(-) diff --git a/src/webui/service/device/routes.py b/src/webui/service/device/routes.py index b57c5735d..65b818b7a 100644 --- a/src/webui/service/device/routes.py +++ b/src/webui/service/device/routes.py @@ -29,7 +29,7 @@ device_client = DeviceClient() @device.get('/') def home(): - if 'context_topology_uuid' not in session: + if 'context_uuid' not in session or 'topology_uuid' not in session: flash("Please select a context!", "warning") return redirect(url_for("main.home")) diff --git a/src/webui/service/link/routes.py b/src/webui/service/link/routes.py index 5b8831b77..0bfe2b902 100644 --- a/src/webui/service/link/routes.py +++ b/src/webui/service/link/routes.py @@ -25,7 +25,7 @@ context_client = ContextClient() @link.get('/') def home(): - if 'context_topology_uuid' not in session: + if 'context_uuid' not in session or 'topology_uuid' not in session: flash("Please select a context!", "warning") return redirect(url_for("main.home")) diff --git a/src/webui/service/main/routes.py b/src/webui/service/main/routes.py index 0e0087347..3128cdad8 100644 --- a/src/webui/service/main/routes.py +++ b/src/webui/service/main/routes.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import json, logging, re +import base64, json, logging, re from flask import jsonify, redirect, render_template, Blueprint, flash, session, url_for, request -from common.proto.context_pb2 import Empty, ContextIdList, TopologyId, TopologyIdList +from common.proto.context_pb2 import ContextList, Empty, TopologyId, TopologyList from common.tools.descriptor.Loader import DescriptorLoader, compose_notifications from common.tools.grpc.Tools import grpc_message_to_json_string from common.tools.object_factory.Context import json_context_id @@ -55,28 +55,44 @@ def home(): context_topology_form: ContextTopologyForm = ContextTopologyForm() context_topology_form.context_topology.choices.append(('', 'Select...')) - ctx_response: ContextIdList = context_client.ListContextIds(Empty()) - for context_id in ctx_response.context_ids: - context_uuid = context_id.context_uuid.uuid - topo_response: TopologyIdList = context_client.ListTopologyIds(context_id) - for topology_id in topo_response.topology_ids: - topology_uuid = topology_id.topology_uuid.uuid - context_topology_uuid = 'ctx[{:s}]/topo[{:s}]'.format(context_uuid, topology_uuid) - context_topology_name = 'Context({:s}):Topology({:s})'.format(context_uuid, topology_uuid) + contexts : ContextList = context_client.ListContexts(Empty()) + for context_ in contexts.contexts: + context_uuid : str = context_.context_id.context_uuid.uuid + context_name : str = context_.name + topologies : TopologyList = context_client.ListTopologies(context_.context_id) + for topology_ in topologies.topology_ids: + topology_uuid : str = topology_.topology_id.topology_uuid.uuid + topology_name : str = topology_.name + raw_values = context_uuid, context_name, topology_uuid, topology_name + b64_values = [base64.b64decode(v.encode('utf-8')).decode('utf-8') for v in raw_values] + context_topology_uuid = ','.join(b64_values) + context_topology_name = 'Context({:s}):Topology({:s})'.format(context_name, topology_name) context_topology_entry = (context_topology_uuid, context_topology_name) context_topology_form.context_topology.choices.append(context_topology_entry) if context_topology_form.validate_on_submit(): context_topology_uuid = context_topology_form.context_topology.data if len(context_topology_uuid) > 0: - match = re.match('ctx\[([^\]]+)\]\/topo\[([^\]]+)\]', context_topology_uuid) - if match is not None: - session['context_topology_uuid'] = context_topology_uuid = match.group(0) - session['context_uuid'] = context_uuid = match.group(1) - session['topology_uuid'] = topology_uuid = match.group(2) - MSG = f'Context({context_uuid})/Topology({topology_uuid}) successfully selected.' - flash(MSG, 'success') - return redirect(url_for("main.home")) + b64_values = context_topology_uuid.split(',') + raw_values = [base64.b64decode(v.encode('utf-8')).decode('utf-8') for v in b64_values] + context_uuid, context_name, topology_uuid, topology_name = raw_values + session['context_topology_uuid'] = context_topology_uuid + session['context_uuid'] = context_uuid + session['context_name'] = context_name + session['topology_uuid'] = topology_uuid + session['topology_name'] = topology_name + MSG = f'Context({context_name})/Topology({topology_name}) successfully selected.' + flash(MSG, 'success') + return redirect(url_for('main.home')) + + #match = re.match('ctx\[([^\]]+)\]\/topo\[([^\]]+)\]', context_topology_uuid) + #if match is not None: + # session['context_topology_uuid'] = context_topology_uuid = match.group(0) + # session['context_uuid'] = context_uuid = match.group(1) + # session['topology_uuid'] = topology_uuid = match.group(2) + # MSG = f'Context({context_uuid})/Topology({topology_uuid}) successfully selected.' + # flash(MSG, 'success') + # return redirect(url_for('main.home')) if 'context_topology_uuid' in session: context_topology_form.context_topology.data = session['context_topology_uuid'] @@ -100,7 +116,7 @@ def home(): def topology(): context_client.connect() try: - if 'context_topology_uuid' not in session: + if 'context_uuid' not in session or 'topology_uuid' not in session: return jsonify({'devices': [], 'links': []}) context_uuid = session['context_uuid'] -- GitLab From 564e686d7fcaf3ac10d2ba042e7cb0cd792f9a5f Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Thu, 19 Jan 2023 16:31:28 +0000 Subject: [PATCH 068/158] Context component: - added default values to context/topology UUID generation functions - extended endpoint UUID generation function to allow default values for ontext/topology --- src/context/service/database/uuids/Context.py | 8 ++++++-- src/context/service/database/uuids/EndPoint.py | 2 +- src/context/service/database/uuids/Topology.py | 7 +++++-- 3 files changed, 12 insertions(+), 5 deletions(-) diff --git a/src/context/service/database/uuids/Context.py b/src/context/service/database/uuids/Context.py index 1b798123e..aa62a9f48 100644 --- a/src/context/service/database/uuids/Context.py +++ b/src/context/service/database/uuids/Context.py @@ -12,12 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. +from common.Constants import DEFAULT_CONTEXT_NAME from common.proto.context_pb2 import ContextId from common.method_wrappers.ServiceExceptions import InvalidArgumentsException from ._Builder import get_uuid_from_string, get_uuid_random def context_get_uuid( - context_id : ContextId, context_name : str = '', allow_random : bool = False + context_id : ContextId, context_name : str = '', allow_random : bool = False, allow_default : bool = False ) -> str: context_uuid = context_id.context_uuid.uuid @@ -25,7 +26,10 @@ def context_get_uuid( return get_uuid_from_string(context_uuid) if len(context_name) > 0: return get_uuid_from_string(context_name) - if allow_random: return get_uuid_random() + if allow_default: + get_uuid_from_string(DEFAULT_CONTEXT_NAME) + if allow_random: + return get_uuid_random() raise InvalidArgumentsException([ ('context_id.context_uuid.uuid', context_uuid), diff --git a/src/context/service/database/uuids/EndPoint.py b/src/context/service/database/uuids/EndPoint.py index f257d1b41..3ceb39c4b 100644 --- a/src/context/service/database/uuids/EndPoint.py +++ b/src/context/service/database/uuids/EndPoint.py @@ -23,7 +23,7 @@ def endpoint_get_uuid( endpoint_id : EndPointId, endpoint_name : str = '', allow_random : bool = False ) -> Tuple[str, str, str]: device_uuid = device_get_uuid(endpoint_id.device_id, allow_random=False) - _,topology_uuid = topology_get_uuid(endpoint_id.topology_id, allow_random=False) + _,topology_uuid = topology_get_uuid(endpoint_id.topology_id, allow_random=False, allow_default=True) raw_endpoint_uuid = endpoint_id.endpoint_uuid.uuid if len(raw_endpoint_uuid) > 0: diff --git a/src/context/service/database/uuids/Topology.py b/src/context/service/database/uuids/Topology.py index e23f95238..86423b097 100644 --- a/src/context/service/database/uuids/Topology.py +++ b/src/context/service/database/uuids/Topology.py @@ -13,21 +13,24 @@ # limitations under the License. from typing import Tuple +from common.Constants import DEFAULT_TOPOLOGY_NAME from common.proto.context_pb2 import TopologyId from common.method_wrappers.ServiceExceptions import InvalidArgumentsException from ._Builder import get_uuid_from_string, get_uuid_random from .Context import context_get_uuid def topology_get_uuid( - topology_id : TopologyId, topology_name : str = '', allow_random : bool = False + topology_id : TopologyId, topology_name : str = '', allow_random : bool = False, allow_default : bool = False ) -> Tuple[str, str]: - context_uuid = context_get_uuid(topology_id.context_id, allow_random=False) + context_uuid = context_get_uuid(topology_id.context_id, allow_random=False, allow_default=allow_default) raw_topology_uuid = topology_id.topology_uuid.uuid if len(raw_topology_uuid) > 0: return context_uuid, get_uuid_from_string(raw_topology_uuid, prefix_for_name=context_uuid) if len(topology_name) > 0: return context_uuid, get_uuid_from_string(topology_name, prefix_for_name=context_uuid) + if allow_default: + return context_uuid, get_uuid_from_string(DEFAULT_TOPOLOGY_NAME) if allow_random: return context_uuid, get_uuid_random() -- GitLab From d9ad2e1423ef8621db677e03d0b80e719029ab43 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Thu, 19 Jan 2023 16:37:01 +0000 Subject: [PATCH 069/158] WebUI component: - corrected retrieval of topologies --- src/webui/service/main/routes.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/webui/service/main/routes.py b/src/webui/service/main/routes.py index 3128cdad8..8b5283f0f 100644 --- a/src/webui/service/main/routes.py +++ b/src/webui/service/main/routes.py @@ -60,7 +60,7 @@ def home(): context_uuid : str = context_.context_id.context_uuid.uuid context_name : str = context_.name topologies : TopologyList = context_client.ListTopologies(context_.context_id) - for topology_ in topologies.topology_ids: + for topology_ in topologies.topologies: topology_uuid : str = topology_.topology_id.topology_uuid.uuid topology_name : str = topology_.name raw_values = context_uuid, context_name, topology_uuid, topology_name -- GitLab From 374540e4b3e541c3f806a59996932018a5a0e870 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Thu, 19 Jan 2023 16:39:37 +0000 Subject: [PATCH 070/158] WebUI component: - minor bug resolution --- src/webui/service/main/routes.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/webui/service/main/routes.py b/src/webui/service/main/routes.py index 8b5283f0f..3fc4b7af8 100644 --- a/src/webui/service/main/routes.py +++ b/src/webui/service/main/routes.py @@ -64,7 +64,7 @@ def home(): topology_uuid : str = topology_.topology_id.topology_uuid.uuid topology_name : str = topology_.name raw_values = context_uuid, context_name, topology_uuid, topology_name - b64_values = [base64.b64decode(v.encode('utf-8')).decode('utf-8') for v in raw_values] + b64_values = [base64.b64encode(v.encode('utf-8')).decode('utf-8') for v in raw_values] context_topology_uuid = ','.join(b64_values) context_topology_name = 'Context({:s}):Topology({:s})'.format(context_name, topology_name) context_topology_entry = (context_topology_uuid, context_topology_name) -- GitLab From 82fcc13461c23a2d780bc005d42e92671d2cbbe6 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Thu, 19 Jan 2023 16:44:53 +0000 Subject: [PATCH 071/158] WebUI component: - corrected retrieval of context/topology names --- src/webui/service/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/webui/service/__init__.py b/src/webui/service/__init__.py index d60cca659..7de1fdc28 100644 --- a/src/webui/service/__init__.py +++ b/src/webui/service/__init__.py @@ -19,10 +19,10 @@ from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient def get_working_context() -> str: - return session['context_uuid'] if 'context_uuid' in session else '---' + return session['context_name'] if 'context_name' in session else '---' def get_working_topology() -> str: - return session['topology_uuid'] if 'topology_uuid' in session else '---' + return session['topology_name'] if 'topology_name' in session else '---' def liveness(): pass -- GitLab From 73e0356b0a1ebe711572fe23c3824131e98906cc Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Thu, 19 Jan 2023 16:45:13 +0000 Subject: [PATCH 072/158] Context component: - corrected generation of default uuid for context entities --- src/context/service/database/uuids/Context.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/context/service/database/uuids/Context.py b/src/context/service/database/uuids/Context.py index aa62a9f48..16876d686 100644 --- a/src/context/service/database/uuids/Context.py +++ b/src/context/service/database/uuids/Context.py @@ -27,7 +27,7 @@ def context_get_uuid( if len(context_name) > 0: return get_uuid_from_string(context_name) if allow_default: - get_uuid_from_string(DEFAULT_CONTEXT_NAME) + return get_uuid_from_string(DEFAULT_CONTEXT_NAME) if allow_random: return get_uuid_random() -- GitLab From bd7f1d0acf5311e2942410d83acca34e704db7b3 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Thu, 19 Jan 2023 17:11:54 +0000 Subject: [PATCH 073/158] WebUI component: - testing session management --- src/webui/service/main/routes.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/webui/service/main/routes.py b/src/webui/service/main/routes.py index 3fc4b7af8..33091890a 100644 --- a/src/webui/service/main/routes.py +++ b/src/webui/service/main/routes.py @@ -32,7 +32,7 @@ device_client = DeviceClient() service_client = ServiceClient() slice_client = SliceClient() -logger = logging.getLogger(__name__) +LOGGER = logging.getLogger(__name__) def process_descriptors(descriptors): try: @@ -74,7 +74,9 @@ def home(): context_topology_uuid = context_topology_form.context_topology.data if len(context_topology_uuid) > 0: b64_values = context_topology_uuid.split(',') + LOGGER.warning('b64_values={:s}'.format(str(b64_values))) raw_values = [base64.b64decode(v.encode('utf-8')).decode('utf-8') for v in b64_values] + LOGGER.warning('raw_values={:s}'.format(str(raw_values))) context_uuid, context_name, topology_uuid, topology_name = raw_values session['context_topology_uuid'] = context_topology_uuid session['context_uuid'] = context_uuid @@ -103,7 +105,7 @@ def home(): process_descriptors(descriptor_form.descriptors) return redirect(url_for("main.home")) except Exception as e: # pylint: disable=broad-except - logger.exception('Descriptor load failed') + LOGGER.exception('Descriptor load failed') flash(f'Descriptor load failed: `{str(e)}`', 'danger') finally: context_client.close() @@ -144,7 +146,7 @@ def topology(): if link.link_id.link_uuid.uuid not in topo_link_uuids: continue if len(link.link_endpoint_ids) != 2: str_link = grpc_message_to_json_string(link) - logger.warning('Unexpected link with len(endpoints) != 2: {:s}'.format(str_link)) + LOGGER.warning('Unexpected link with len(endpoints) != 2: {:s}'.format(str_link)) continue links.append({ 'id': link.link_id.link_uuid.uuid, @@ -154,7 +156,7 @@ def topology(): return jsonify({'devices': devices, 'links': links}) except: - logger.exception('Error retrieving topology') + LOGGER.exception('Error retrieving topology') finally: context_client.close() -- GitLab From 7f5455104ddb2d6abae800b4e39bae69e9b564d6 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Thu, 19 Jan 2023 17:19:43 +0000 Subject: [PATCH 074/158] WebUI component: - testing session management --- src/webui/service/main/routes.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/webui/service/main/routes.py b/src/webui/service/main/routes.py index 33091890a..9f80981db 100644 --- a/src/webui/service/main/routes.py +++ b/src/webui/service/main/routes.py @@ -83,6 +83,7 @@ def home(): session['context_name'] = context_name session['topology_uuid'] = topology_uuid session['topology_name'] = topology_name + LOGGER.warning('session.items={:s}'.format(str(session.items()))) MSG = f'Context({context_name})/Topology({topology_name}) successfully selected.' flash(MSG, 'success') return redirect(url_for('main.home')) -- GitLab From ee9d0247921d15c0f28942b9a024cae56368b9c7 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Thu, 19 Jan 2023 17:28:18 +0000 Subject: [PATCH 075/158] WebUI component: - testing session management --- src/webui/service/main/routes.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/webui/service/main/routes.py b/src/webui/service/main/routes.py index 9f80981db..eb7c87a91 100644 --- a/src/webui/service/main/routes.py +++ b/src/webui/service/main/routes.py @@ -78,6 +78,7 @@ def home(): raw_values = [base64.b64decode(v.encode('utf-8')).decode('utf-8') for v in b64_values] LOGGER.warning('raw_values={:s}'.format(str(raw_values))) context_uuid, context_name, topology_uuid, topology_name = raw_values + session.clear() session['context_topology_uuid'] = context_topology_uuid session['context_uuid'] = context_uuid session['context_name'] = context_name -- GitLab From aeb9da43311f505711f21684c372ef0d73fa2f79 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Thu, 19 Jan 2023 18:05:46 +0000 Subject: [PATCH 076/158] WebUI component: - testing session management --- src/webui/service/__init__.py | 4 ++-- src/webui/service/main/routes.py | 14 +++++++------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/src/webui/service/__init__.py b/src/webui/service/__init__.py index 7de1fdc28..d60cca659 100644 --- a/src/webui/service/__init__.py +++ b/src/webui/service/__init__.py @@ -19,10 +19,10 @@ from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient def get_working_context() -> str: - return session['context_name'] if 'context_name' in session else '---' + return session['context_uuid'] if 'context_uuid' in session else '---' def get_working_topology() -> str: - return session['topology_name'] if 'topology_name' in session else '---' + return session['topology_uuid'] if 'topology_uuid' in session else '---' def liveness(): pass diff --git a/src/webui/service/main/routes.py b/src/webui/service/main/routes.py index eb7c87a91..209131737 100644 --- a/src/webui/service/main/routes.py +++ b/src/webui/service/main/routes.py @@ -63,7 +63,7 @@ def home(): for topology_ in topologies.topologies: topology_uuid : str = topology_.topology_id.topology_uuid.uuid topology_name : str = topology_.name - raw_values = context_uuid, context_name, topology_uuid, topology_name + raw_values = context_name, topology_name b64_values = [base64.b64encode(v.encode('utf-8')).decode('utf-8') for v in raw_values] context_topology_uuid = ','.join(b64_values) context_topology_name = 'Context({:s}):Topology({:s})'.format(context_name, topology_name) @@ -77,13 +77,13 @@ def home(): LOGGER.warning('b64_values={:s}'.format(str(b64_values))) raw_values = [base64.b64decode(v.encode('utf-8')).decode('utf-8') for v in b64_values] LOGGER.warning('raw_values={:s}'.format(str(raw_values))) - context_uuid, context_name, topology_uuid, topology_name = raw_values - session.clear() + context_name, topology_name = raw_values + #session.clear() session['context_topology_uuid'] = context_topology_uuid - session['context_uuid'] = context_uuid - session['context_name'] = context_name - session['topology_uuid'] = topology_uuid - session['topology_name'] = topology_name + session['context_uuid'] = context_name + #session['context_name'] = context_name + session['topology_uuid'] = topology_name + #session['topology_name'] = topology_name LOGGER.warning('session.items={:s}'.format(str(session.items()))) MSG = f'Context({context_name})/Topology({topology_name}) successfully selected.' flash(MSG, 'success') -- GitLab From c282c4712ea9008a3a124749ed24ff65bc7d8b73 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Thu, 19 Jan 2023 18:11:50 +0000 Subject: [PATCH 077/158] WebUI component: - testing session management --- src/webui/service/main/routes.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/webui/service/main/routes.py b/src/webui/service/main/routes.py index 209131737..52972104c 100644 --- a/src/webui/service/main/routes.py +++ b/src/webui/service/main/routes.py @@ -46,6 +46,7 @@ def process_descriptors(descriptors): descriptor_loader = DescriptorLoader(descriptors) results = descriptor_loader.process() for message,level in compose_notifications(results): + LOGGER.warning('notification level={:s} message={:s}'.format(str(level), str(message))) flash(message, level) @main.route('/', methods=['GET', 'POST']) -- GitLab From e087349ca0961d7d3d9e56190dd189aeb321dc31 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Thu, 19 Jan 2023 18:19:02 +0000 Subject: [PATCH 078/158] Context component: - corrected generation of default uuid for topology entities --- src/context/service/database/uuids/Topology.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/context/service/database/uuids/Topology.py b/src/context/service/database/uuids/Topology.py index 86423b097..15387c9d6 100644 --- a/src/context/service/database/uuids/Topology.py +++ b/src/context/service/database/uuids/Topology.py @@ -30,7 +30,7 @@ def topology_get_uuid( if len(topology_name) > 0: return context_uuid, get_uuid_from_string(topology_name, prefix_for_name=context_uuid) if allow_default: - return context_uuid, get_uuid_from_string(DEFAULT_TOPOLOGY_NAME) + return context_uuid, get_uuid_from_string(DEFAULT_TOPOLOGY_NAME, prefix_for_name=context_uuid) if allow_random: return context_uuid, get_uuid_random() -- GitLab From 0ee137fb4c87f1e0b7d5071c38f69491dbb598ef Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Thu, 19 Jan 2023 18:19:59 +0000 Subject: [PATCH 079/158] WebUI component: - testing session management --- src/webui/service/main/routes.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/webui/service/main/routes.py b/src/webui/service/main/routes.py index 52972104c..61059fc95 100644 --- a/src/webui/service/main/routes.py +++ b/src/webui/service/main/routes.py @@ -46,7 +46,7 @@ def process_descriptors(descriptors): descriptor_loader = DescriptorLoader(descriptors) results = descriptor_loader.process() for message,level in compose_notifications(results): - LOGGER.warning('notification level={:s} message={:s}'.format(str(level), str(message))) + if level == 'error': LOGGER.warning('ERROR message={:s}'.format(str(message))) flash(message, level) @main.route('/', methods=['GET', 'POST']) -- GitLab From 53d4143da377f26642c0f6c6dacf61f01493b8a9 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Thu, 19 Jan 2023 18:29:46 +0000 Subject: [PATCH 080/158] WebUI component: - improved name reporting of devices and links --- src/webui/service/templates/device/detail.html | 3 ++- src/webui/service/templates/device/home.html | 10 ++++++---- src/webui/service/templates/link/detail.html | 5 ++++- src/webui/service/templates/link/home.html | 14 ++++++++------ 4 files changed, 20 insertions(+), 12 deletions(-) diff --git a/src/webui/service/templates/device/detail.html b/src/webui/service/templates/device/detail.html index 69ca93727..6b39e2217 100644 --- a/src/webui/service/templates/device/detail.html +++ b/src/webui/service/templates/device/detail.html @@ -17,7 +17,7 @@ {% extends 'base.html' %} {% block content %} - <h1>Device {{ device.device_id.device_uuid.uuid }}</h1> + <h1>Device {{ device.name }} ({{ device.device_id.device_uuid.uuid }})</h1> <div class="row mb-3"> <div class="col-sm-3"> @@ -44,6 +44,7 @@ <div class="row mb-3"> <div class="col-sm-4"> <b>UUID: </b>{{ device.device_id.device_uuid.uuid }}<br><br> + <b>Name: </b>{{ device.name }}<br><br> <b>Type: </b>{{ device.device_type }}<br><br> <b>Status: </b> {{ dose.Name(device.device_operational_status).replace('DEVICEOPERATIONALSTATUS_', '') }}<br> <b>Drivers: </b> diff --git a/src/webui/service/templates/device/home.html b/src/webui/service/templates/device/home.html index 2c108add9..7b4437cce 100644 --- a/src/webui/service/templates/device/home.html +++ b/src/webui/service/templates/device/home.html @@ -42,7 +42,8 @@ <table class="table table-striped table-hover"> <thead> <tr> - <th scope="col">#</th> + <th scope="col">UUID</th> + <th scope="col">Name</th> <th scope="col">Type</th> <th scope="col">Endpoints</th> <th scope="col">Drivers</th> @@ -56,9 +57,10 @@ {% for device in devices %} <tr> <td> - <!-- <a href="{{ url_for('device.detail', device_uuid=device.device_id.device_uuid.uuid) }}"> --> - {{ device.device_id.device_uuid.uuid }} - <!-- </a> --> + {{ device.device_id.device_uuid.uuid }} + </td> + <td> + {{ device.name }} </td> <td> {{ device.device_type }} diff --git a/src/webui/service/templates/link/detail.html b/src/webui/service/templates/link/detail.html index 7df9ddce6..fc865a4b9 100644 --- a/src/webui/service/templates/link/detail.html +++ b/src/webui/service/templates/link/detail.html @@ -16,7 +16,7 @@ {% extends 'base.html' %} {% block content %} - <h1>Link {{ link.link_id.link_uuid.uuid }}</h1> + <h1>Link {{ link.name }} ({{ link.link_id.link_uuid.uuid }})</h1> <div class="row mb-3"> <div class="col-sm-3"> <button type="button" class="btn btn-success" onclick="window.location.href='{{ url_for('link.home') }}'"> @@ -31,6 +31,9 @@ <div class="col-sm-4"> <b>UUID: </b>{{ link.link_id.link_uuid.uuid }}<br><br> </div> + <div class="col-sm-4"> + <b>Name: </b>{{ link.name }}<br><br> + </div> <div class="col-sm-8"> <table class="table table-striped table-hover"> <thead> diff --git a/src/webui/service/templates/link/home.html b/src/webui/service/templates/link/home.html index 77d00d341..16fe36e1f 100644 --- a/src/webui/service/templates/link/home.html +++ b/src/webui/service/templates/link/home.html @@ -27,7 +27,7 @@ </a> --> </div> <div class="col"> - {{ links | length }} links found</i> + {{ links | length }} links found in context <i>{{ session['context_uuid'] }}</i> </div> <!-- <div class="col"> <form> @@ -42,7 +42,8 @@ <table class="table table-striped table-hover"> <thead> <tr> - <th scope="col">#</th> + <th scope="col">UUID</th> + <th scope="col">Name</th> <th scope="col">Endpoints</th> <th scope="col"></th> </tr> @@ -52,11 +53,12 @@ {% for link in links %} <tr> <td> - <!-- <a href="#"> --> - {{ link.link_id.link_uuid.uuid }} - <!-- </a> --> + {{ link.link_id.link_uuid.uuid }} </td> - + <td> + {{ link.name }} + </td> + <td> <ul> {% for end_point in link.link_endpoint_ids %} -- GitLab From 06b37d62c425a90e5d0471d243cbebedbfb726ad Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Thu, 19 Jan 2023 18:30:41 +0000 Subject: [PATCH 081/158] WebUI component: - removed unneeded log messages --- src/webui/service/main/routes.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/src/webui/service/main/routes.py b/src/webui/service/main/routes.py index 61059fc95..02706c858 100644 --- a/src/webui/service/main/routes.py +++ b/src/webui/service/main/routes.py @@ -58,11 +58,11 @@ def home(): contexts : ContextList = context_client.ListContexts(Empty()) for context_ in contexts.contexts: - context_uuid : str = context_.context_id.context_uuid.uuid + #context_uuid : str = context_.context_id.context_uuid.uuid context_name : str = context_.name topologies : TopologyList = context_client.ListTopologies(context_.context_id) for topology_ in topologies.topologies: - topology_uuid : str = topology_.topology_id.topology_uuid.uuid + #topology_uuid : str = topology_.topology_id.topology_uuid.uuid topology_name : str = topology_.name raw_values = context_name, topology_name b64_values = [base64.b64encode(v.encode('utf-8')).decode('utf-8') for v in raw_values] @@ -75,9 +75,7 @@ def home(): context_topology_uuid = context_topology_form.context_topology.data if len(context_topology_uuid) > 0: b64_values = context_topology_uuid.split(',') - LOGGER.warning('b64_values={:s}'.format(str(b64_values))) raw_values = [base64.b64decode(v.encode('utf-8')).decode('utf-8') for v in b64_values] - LOGGER.warning('raw_values={:s}'.format(str(raw_values))) context_name, topology_name = raw_values #session.clear() session['context_topology_uuid'] = context_topology_uuid @@ -85,7 +83,6 @@ def home(): #session['context_name'] = context_name session['topology_uuid'] = topology_name #session['topology_name'] = topology_name - LOGGER.warning('session.items={:s}'.format(str(session.items()))) MSG = f'Context({context_name})/Topology({topology_name}) successfully selected.' flash(MSG, 'success') return redirect(url_for('main.home')) -- GitLab From 25fc59a74d64adf8d21023e6a859678ae8e393a0 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Fri, 20 Jan 2023 09:42:48 +0000 Subject: [PATCH 082/158] CockroachDB deployment: - updated README file --- manifests/cockroachdb/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/manifests/cockroachdb/README.md b/manifests/cockroachdb/README.md index b61e05f82..2d9a94910 100644 --- a/manifests/cockroachdb/README.md +++ b/manifests/cockroachdb/README.md @@ -11,13 +11,13 @@ kubectl apply -f "${DEPLOY_PATH}/crds.yaml" # Deploy CockroachDB Operator curl -o "${DEPLOY_PATH}/operator.yaml" "${OPERATOR_BASE_URL}/install/operator.yaml" -# edit "${DEPLOY_PATH}/operator.yaml" +nano "${DEPLOY_PATH}/operator.yaml" # - add env var: WATCH_NAMESPACE='crdb' kubectl apply -f "${DEPLOY_PATH}/operator.yaml" # Deploy CockroachDB curl -o "${DEPLOY_PATH}/cluster.yaml" "${OPERATOR_BASE_URL}/examples/example.yaml" -# edit "${DEPLOY_PATH}/cluster.yaml" +nano "${DEPLOY_PATH}/cluster.yaml" # - set version # - set number of replicas kubectl create namespace crdb -- GitLab From ecc3206998c9ad4b0e0804394f649b06f17f56a3 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Fri, 20 Jan 2023 14:35:32 +0000 Subject: [PATCH 083/158] WebUI: - improved device details page --- src/webui/service/templates/device/detail.html | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/webui/service/templates/device/detail.html b/src/webui/service/templates/device/detail.html index e49396c4f..db1d9a8ef 100644 --- a/src/webui/service/templates/device/detail.html +++ b/src/webui/service/templates/device/detail.html @@ -17,7 +17,7 @@ {% extends 'base.html' %} {% block content %} -<h1>Device {{ device.device_id.device_uuid.uuid }}</h1> +<h1>Device {{ device.name }} ({{ device.device_id.device_uuid.uuid }})</h1> <div class="row mb-3"> <div class="col-sm-3"> @@ -44,6 +44,7 @@ <div class="row mb-3"> <div class="col-sm-4"> <b>UUID: </b>{{ device.device_id.device_uuid.uuid }}<br><br> + <b>Name: </b>{{ device.name }}<br><br> <b>Type: </b>{{ device.device_type }}<br><br> <b>Status: </b> {{ dose.Name(device.device_operational_status).replace('DEVICEOPERATIONALSTATUS_', '') }}<br> <b>Drivers: </b> @@ -57,7 +58,8 @@ <table class="table table-striped table-hover"> <thead> <tr> - <th scope="col">Endpoints</th> + <th scope="col">Endpoint</th> + <th scope="col">Name</th> <th scope="col">Type</th> </tr> </thead> @@ -67,6 +69,9 @@ <td> {{ endpoint.endpoint_id.endpoint_uuid.uuid }} </td> + <td> + {{ endpoint.name }} + </td> <td> {{ endpoint.endpoint_type }} </td> -- GitLab From 173af90fd25bc5e162981f300284340bcd0089e2 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Fri, 20 Jan 2023 16:06:33 +0000 Subject: [PATCH 084/158] Load Generator component: - Converted load_gen tool into a microservice - Added simple WebUI page to manage it - Created manifests, Dockerfile, etc. - Created proto file to manage the load generator --- .gitlab-ci.yml | 1 + manifests/load_generatorservice.yaml | 67 +++++++++++++++++ proto/load_generator.proto | 23 ++++++ src/common/Constants.py | 6 +- .../method_wrappers/tests/deploy_specs.sh | 2 +- src/load_generator/.gitlab-ci.yml | 39 ++++++++++ .../__init__.py => load_generator/Config.py} | 0 src/load_generator/Dockerfile | 71 +++++++++++++++++++ src/load_generator/README.md | 18 +++++ src/load_generator/__init__.py | 14 ++++ .../client/LoadGeneratorClient.py | 60 ++++++++++++++++ src/load_generator/client/__init__.py | 14 ++++ src/load_generator/command/__init__.py | 14 ++++ .../command}/__main__.py | 11 +-- .../load_gen/Constants.py | 0 .../load_gen/DltTools.py | 0 .../load_gen/Parameters.py | 0 .../load_gen/RequestGenerator.py | 2 +- .../load_gen/RequestScheduler.py | 9 ++- src/load_generator/load_gen/__init__.py | 14 ++++ src/load_generator/requirements.in | 15 ++++ .../tools/load_gen => load_generator}/run.sh | 4 +- .../service/LoadGeneratorService.py | 28 ++++++++ .../LoadGeneratorServiceServicerImpl.py | 63 ++++++++++++++++ src/load_generator/service/__init__.py | 14 ++++ src/load_generator/service/__main__.py | 64 +++++++++++++++++ src/load_generator/tests/__init__.py | 14 ++++ .../tests}/deploy_specs.sh | 2 +- .../tests}/descriptors.json | 0 .../tests}/test_dlt_functional.py | 0 src/webui/service/__init__.py | 3 + src/webui/service/load_gen/__init__.py | 14 ++++ src/webui/service/load_gen/routes.py | 45 ++++++++++++ src/webui/service/templates/main/debug.html | 33 +++++---- 34 files changed, 638 insertions(+), 26 deletions(-) create mode 100644 manifests/load_generatorservice.yaml create mode 100644 proto/load_generator.proto create mode 100644 src/load_generator/.gitlab-ci.yml rename src/{tests/tools/load_gen/__init__.py => load_generator/Config.py} (100%) create mode 100644 src/load_generator/Dockerfile create mode 100644 src/load_generator/README.md create mode 100644 src/load_generator/__init__.py create mode 100644 src/load_generator/client/LoadGeneratorClient.py create mode 100644 src/load_generator/client/__init__.py create mode 100644 src/load_generator/command/__init__.py rename src/{tests/tools/load_gen => load_generator/command}/__main__.py (80%) rename src/{tests/tools => load_generator}/load_gen/Constants.py (100%) rename src/{tests/tools => load_generator}/load_gen/DltTools.py (100%) rename src/{tests/tools => load_generator}/load_gen/Parameters.py (100%) rename src/{tests/tools => load_generator}/load_gen/RequestGenerator.py (99%) rename src/{tests/tools => load_generator}/load_gen/RequestScheduler.py (97%) create mode 100644 src/load_generator/load_gen/__init__.py create mode 100644 src/load_generator/requirements.in rename src/{tests/tools/load_gen => load_generator}/run.sh (90%) create mode 100644 src/load_generator/service/LoadGeneratorService.py create mode 100644 src/load_generator/service/LoadGeneratorServiceServicerImpl.py create mode 100644 src/load_generator/service/__init__.py create mode 100644 src/load_generator/service/__main__.py create mode 100644 src/load_generator/tests/__init__.py rename src/{tests/tools/load_gen => load_generator/tests}/deploy_specs.sh (95%) rename src/{tests/tools/load_gen => load_generator/tests}/descriptors.json (100%) rename src/{tests/tools/load_gen => load_generator/tests}/test_dlt_functional.py (100%) create mode 100644 src/webui/service/load_gen/__init__.py create mode 100644 src/webui/service/load_gen/routes.py diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 8e26a1644..316a38f23 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -43,3 +43,4 @@ include: #- local: '/src/interdomain/.gitlab-ci.yml' - local: '/src/pathcomp/.gitlab-ci.yml' #- local: '/src/dlt/.gitlab-ci.yml' + - local: '/src/load_generator/.gitlab-ci.yml' diff --git a/manifests/load_generatorservice.yaml b/manifests/load_generatorservice.yaml new file mode 100644 index 000000000..88b1fa397 --- /dev/null +++ b/manifests/load_generatorservice.yaml @@ -0,0 +1,67 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: load-generatorservice +spec: + selector: + matchLabels: + app: load-generatorservice + replicas: 1 + template: + metadata: + labels: + app: load-generatorservice + spec: + terminationGracePeriodSeconds: 5 + containers: + - name: server + image: registry.gitlab.com/teraflow-h2020/controller/load_generator:latest + imagePullPolicy: Always + ports: + - containerPort: 50052 + env: + - name: LOG_LEVEL + value: "INFO" + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:50052"] + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:50052"] + resources: + requests: + cpu: 50m + memory: 64Mi + limits: + cpu: 500m + memory: 512Mi +--- +apiVersion: v1 +kind: Service +metadata: + name: load-generatorservice + labels: + app: load-generatorservice +spec: + type: ClusterIP + selector: + app: load-generatorservice + ports: + - name: grpc + protocol: TCP + port: 50052 + targetPort: 50052 diff --git a/proto/load_generator.proto b/proto/load_generator.proto new file mode 100644 index 000000000..00ddb254c --- /dev/null +++ b/proto/load_generator.proto @@ -0,0 +1,23 @@ +// Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; +package load_generator; + +import "context.proto"; + +service LoadGeneratorService { + rpc Start(context.Empty) returns (context.Empty) {} + rpc Stop (context.Empty) returns (context.Empty) {} +} diff --git a/src/common/Constants.py b/src/common/Constants.py index bdbde21b2..bd403c084 100644 --- a/src/common/Constants.py +++ b/src/common/Constants.py @@ -54,7 +54,8 @@ class ServiceNameEnum(Enum): WEBUI = 'webui' # Used for test and debugging only - DLT_GATEWAY = 'dltgateway' + DLT_GATEWAY = 'dltgateway' + LOAD_GENERATOR = 'load_generator' # Default gRPC service ports DEFAULT_SERVICE_GRPC_PORTS = { @@ -72,7 +73,8 @@ DEFAULT_SERVICE_GRPC_PORTS = { ServiceNameEnum.PATHCOMP .value : 10020, # Used for test and debugging only - ServiceNameEnum.DLT_GATEWAY .value : 50051, + ServiceNameEnum.DLT_GATEWAY .value : 50051, + ServiceNameEnum.LOAD_GENERATOR.value : 50052, } # Default HTTP/REST-API service ports diff --git a/src/common/method_wrappers/tests/deploy_specs.sh b/src/common/method_wrappers/tests/deploy_specs.sh index 238918480..ab90ab3d3 100644 --- a/src/common/method_wrappers/tests/deploy_specs.sh +++ b/src/common/method_wrappers/tests/deploy_specs.sh @@ -7,7 +7,7 @@ export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/" # interdomain slice pathcomp dlt # dbscanserving opticalattackmitigator opticalattackdetector # l3_attackmitigator l3_centralizedattackdetector l3_distributedattackdetector -export TFS_COMPONENTS="context device pathcomp service slice webui" # automation monitoring compute +export TFS_COMPONENTS="context device pathcomp service slice webui load_generator" # automation monitoring compute # Set the tag you want to use for your images. export TFS_IMAGE_TAG="dev" diff --git a/src/load_generator/.gitlab-ci.yml b/src/load_generator/.gitlab-ci.yml new file mode 100644 index 000000000..a63bd8d0d --- /dev/null +++ b/src/load_generator/.gitlab-ci.yml @@ -0,0 +1,39 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Build, tag, and push the Docker image to the GitLab Docker registry +build load_generator: + variables: + IMAGE_NAME: 'load_generator' # name of the microservice + IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) + stage: build + before_script: + - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY + script: + - docker build -t "$IMAGE_NAME:$IMAGE_TAG" -f ./src/$IMAGE_NAME/Dockerfile . + - docker tag "$IMAGE_NAME:$IMAGE_TAG" "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG" + - docker push "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG" + after_script: + - docker images --filter="dangling=true" --quiet | xargs -r docker rmi + rules: + - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' + - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' + - changes: + - src/common/**/*.py + - proto/*.proto + - src/$IMAGE_NAME/**/*.{py,in,yml} + - src/$IMAGE_NAME/Dockerfile + - src/$IMAGE_NAME/tests/*.py + - manifests/${IMAGE_NAME}service.yaml + - .gitlab-ci.yml diff --git a/src/tests/tools/load_gen/__init__.py b/src/load_generator/Config.py similarity index 100% rename from src/tests/tools/load_gen/__init__.py rename to src/load_generator/Config.py diff --git a/src/load_generator/Dockerfile b/src/load_generator/Dockerfile new file mode 100644 index 000000000..2e5427a34 --- /dev/null +++ b/src/load_generator/Dockerfile @@ -0,0 +1,71 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM python:3.9-slim + +# Install dependencies +RUN apt-get --yes --quiet --quiet update && \ + apt-get --yes --quiet --quiet install wget g++ && \ + rm -rf /var/lib/apt/lists/* + +# Set Python to show logs as they occur +ENV PYTHONUNBUFFERED=0 + +# Download the gRPC health probe +RUN GRPC_HEALTH_PROBE_VERSION=v0.2.0 && \ + wget -qO/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-amd64 && \ + chmod +x /bin/grpc_health_probe + +# Get generic Python packages +RUN python3 -m pip install --upgrade pip +RUN python3 -m pip install --upgrade setuptools wheel +RUN python3 -m pip install --upgrade pip-tools + +# Get common Python packages +# Note: this step enables sharing the previous Docker build steps among all the Python components +WORKDIR /var/teraflow +COPY common_requirements.in common_requirements.in +RUN pip-compile --quiet --output-file=common_requirements.txt common_requirements.in +RUN python3 -m pip install -r common_requirements.txt + +# Add common files into working directory +WORKDIR /var/teraflow/common +COPY src/common/. ./ +RUN rm -rf proto + +# Create proto sub-folder, copy .proto files, and generate Python code +RUN mkdir -p /var/teraflow/common/proto +WORKDIR /var/teraflow/common/proto +RUN touch __init__.py +COPY proto/*.proto ./ +RUN python3 -m grpc_tools.protoc -I=. --python_out=. --grpc_python_out=. *.proto +RUN rm *.proto +RUN find . -type f -exec sed -i -E 's/(import\ .*)_pb2/from . \1_pb2/g' {} \; + +# Create component sub-folders, get specific Python packages +RUN mkdir -p /var/teraflow/load_generator +WORKDIR /var/teraflow/load_generator +COPY src/load_generator/requirements.in requirements.in +RUN pip-compile --quiet --output-file=requirements.txt requirements.in +RUN python3 -m pip install -r requirements.txt + +# Add component files into working directory +WORKDIR /var/teraflow +COPY src/context/. context/ +COPY src/dlt/. dlt/ +COPY src/service/. service/ +COPY src/slice/. slice/ + +# Start the service +ENTRYPOINT ["python", "-m", "load_generator.service"] diff --git a/src/load_generator/README.md b/src/load_generator/README.md new file mode 100644 index 000000000..e6b0397bf --- /dev/null +++ b/src/load_generator/README.md @@ -0,0 +1,18 @@ +# Tool: Load Generator + +Simple tool to generate load in ETSI TeraFlowSDN controller with requests for creating services and slices. +The tool can be executed form command line or from WebUI interface. + +## Example (Command Line): + +Deploy TeraFlowSDN controller with your specific settings: +```(bash) +cd ~/tfs-ctrl +source my_deploy.sh +./deploy.sh +``` + +Run the tool: +```(bash) +./src/load_generator/run.sh +``` diff --git a/src/load_generator/__init__.py b/src/load_generator/__init__.py new file mode 100644 index 000000000..70a332512 --- /dev/null +++ b/src/load_generator/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/load_generator/client/LoadGeneratorClient.py b/src/load_generator/client/LoadGeneratorClient.py new file mode 100644 index 000000000..d7e215802 --- /dev/null +++ b/src/load_generator/client/LoadGeneratorClient.py @@ -0,0 +1,60 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import grpc, logging +from common.Constants import ServiceNameEnum +from common.Settings import get_service_host, get_service_port_grpc +from common.proto.context_pb2 import Empty +from common.proto.load_generator_pb2_grpc import LoadGeneratorServiceStub +from common.tools.client.RetryDecorator import retry, delay_exponential +from common.tools.grpc.Tools import grpc_message_to_json_string + +LOGGER = logging.getLogger(__name__) +MAX_RETRIES = 15 +DELAY_FUNCTION = delay_exponential(initial=0.01, increment=2.0, maximum=5.0) +RETRY_DECORATOR = retry(max_retries=MAX_RETRIES, delay_function=DELAY_FUNCTION, prepare_method_name='connect') + +class LoadGeneratorClient: + def __init__(self, host=None, port=None): + if not host: host = get_service_host(ServiceNameEnum.LOAD_GENERATOR) + if not port: port = get_service_port_grpc(ServiceNameEnum.LOAD_GENERATOR) + self.endpoint = '{:s}:{:s}'.format(str(host), str(port)) + LOGGER.debug('Creating channel to {:s}...'.format(self.endpoint)) + self.channel = None + self.stub = None + self.connect() + LOGGER.debug('Channel created') + + def connect(self): + self.channel = grpc.insecure_channel(self.endpoint) + self.stub = LoadGeneratorServiceStub(self.channel) + + def close(self): + if self.channel is not None: self.channel.close() + self.channel = None + self.stub = None + + @RETRY_DECORATOR + def Start(self, request : Empty) -> Empty: + LOGGER.debug('Start request: {:s}'.format(grpc_message_to_json_string(request))) + response = self.stub.Start(request) + LOGGER.debug('Start result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + def Stop(self, request : Empty) -> Empty: + LOGGER.debug('Stop request: {:s}'.format(grpc_message_to_json_string(request))) + response = self.stub.Stop(request) + LOGGER.debug('Stop result: {:s}'.format(grpc_message_to_json_string(response))) + return response diff --git a/src/load_generator/client/__init__.py b/src/load_generator/client/__init__.py new file mode 100644 index 000000000..70a332512 --- /dev/null +++ b/src/load_generator/client/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/load_generator/command/__init__.py b/src/load_generator/command/__init__.py new file mode 100644 index 000000000..70a332512 --- /dev/null +++ b/src/load_generator/command/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/tests/tools/load_gen/__main__.py b/src/load_generator/command/__main__.py similarity index 80% rename from src/tests/tools/load_gen/__main__.py rename to src/load_generator/command/__main__.py index 9a5ea2b69..9f61fc080 100644 --- a/src/tests/tools/load_gen/__main__.py +++ b/src/load_generator/command/__main__.py @@ -13,10 +13,11 @@ # limitations under the License. import logging, sys -from .Constants import RequestType -from .Parameters import Parameters -from .RequestGenerator import RequestGenerator -from .RequestScheduler import RequestScheduler +from apscheduler.schedulers.blocking import BlockingScheduler +from load_generator.load_gen.Constants import RequestType +from load_generator.load_gen.Parameters import Parameters +from load_generator.load_gen.RequestGenerator import RequestGenerator +from load_generator.load_gen.RequestScheduler import RequestScheduler logging.basicConfig(level=logging.INFO) LOGGER = logging.getLogger(__name__) @@ -45,7 +46,7 @@ def main(): generator.initialize() LOGGER.info('Running Schedule...') - scheduler = RequestScheduler(parameters, generator) + scheduler = RequestScheduler(parameters, generator, scheduler_class=BlockingScheduler) scheduler.start() LOGGER.info('Done!') diff --git a/src/tests/tools/load_gen/Constants.py b/src/load_generator/load_gen/Constants.py similarity index 100% rename from src/tests/tools/load_gen/Constants.py rename to src/load_generator/load_gen/Constants.py diff --git a/src/tests/tools/load_gen/DltTools.py b/src/load_generator/load_gen/DltTools.py similarity index 100% rename from src/tests/tools/load_gen/DltTools.py rename to src/load_generator/load_gen/DltTools.py diff --git a/src/tests/tools/load_gen/Parameters.py b/src/load_generator/load_gen/Parameters.py similarity index 100% rename from src/tests/tools/load_gen/Parameters.py rename to src/load_generator/load_gen/Parameters.py diff --git a/src/tests/tools/load_gen/RequestGenerator.py b/src/load_generator/load_gen/RequestGenerator.py similarity index 99% rename from src/tests/tools/load_gen/RequestGenerator.py rename to src/load_generator/load_gen/RequestGenerator.py index d38291d38..e983f90dc 100644 --- a/src/tests/tools/load_gen/RequestGenerator.py +++ b/src/load_generator/load_gen/RequestGenerator.py @@ -25,8 +25,8 @@ from common.tools.object_factory.Slice import json_slice from common.tools.object_factory.Topology import json_topology_id from context.client.ContextClient import ContextClient from dlt.connector.client.DltConnectorClient import DltConnectorClient -from tests.tools.load_gen.DltTools import record_device_to_dlt, record_link_to_dlt from .Constants import ENDPOINT_COMPATIBILITY, RequestType +from .DltTools import record_device_to_dlt, record_link_to_dlt from .Parameters import Parameters LOGGER = logging.getLogger(__name__) diff --git a/src/tests/tools/load_gen/RequestScheduler.py b/src/load_generator/load_gen/RequestScheduler.py similarity index 97% rename from src/tests/tools/load_gen/RequestScheduler.py rename to src/load_generator/load_gen/RequestScheduler.py index eafb95c30..408e0125f 100644 --- a/src/tests/tools/load_gen/RequestScheduler.py +++ b/src/load_generator/load_gen/RequestScheduler.py @@ -31,8 +31,10 @@ logging.getLogger('apscheduler.scheduler').setLevel(logging.WARNING) LOGGER = logging.getLogger(__name__) class RequestScheduler: - def __init__(self, parameters : Parameters, generator : RequestGenerator) -> None: - self._scheduler = BlockingScheduler() + def __init__( + self, parameters : Parameters, generator : RequestGenerator, scheduler_class=BlockingScheduler + ) -> None: + self._scheduler = scheduler_class() self._scheduler.configure( jobstores = {'default': MemoryJobStore()}, executors = {'default': ThreadPoolExecutor(max_workers=10)}, @@ -65,6 +67,9 @@ class RequestScheduler: self._schedule_request_setup() self._scheduler.start() + def stop(self): + self._scheduler.shutdown() + def _request_setup(self) -> None: self._schedule_request_setup() diff --git a/src/load_generator/load_gen/__init__.py b/src/load_generator/load_gen/__init__.py new file mode 100644 index 000000000..70a332512 --- /dev/null +++ b/src/load_generator/load_gen/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/load_generator/requirements.in b/src/load_generator/requirements.in new file mode 100644 index 000000000..61a0a0efb --- /dev/null +++ b/src/load_generator/requirements.in @@ -0,0 +1,15 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +APScheduler==3.8.1 diff --git a/src/tests/tools/load_gen/run.sh b/src/load_generator/run.sh similarity index 90% rename from src/tests/tools/load_gen/run.sh rename to src/load_generator/run.sh index b16808ab6..35db1ad4d 100755 --- a/src/tests/tools/load_gen/run.sh +++ b/src/load_generator/run.sh @@ -13,5 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +# Use this script to run standalone + source tfs_runtime_env_vars.sh -python -m tests.tools.load_gen +python -m load_generator.command diff --git a/src/load_generator/service/LoadGeneratorService.py b/src/load_generator/service/LoadGeneratorService.py new file mode 100644 index 000000000..0127e5f86 --- /dev/null +++ b/src/load_generator/service/LoadGeneratorService.py @@ -0,0 +1,28 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from common.Constants import ServiceNameEnum +from common.Settings import get_service_port_grpc +from common.proto.load_generator_pb2_grpc import add_LoadGeneratorServiceServicer_to_server +from common.tools.service.GenericGrpcService import GenericGrpcService +from .LoadGeneratorServiceServicerImpl import LoadGeneratorServiceServicerImpl + +class LoadGeneratorService(GenericGrpcService): + def __init__(self, cls_name: str = __name__) -> None: + port = get_service_port_grpc(ServiceNameEnum.LOAD_GENERATOR) + super().__init__(port, cls_name=cls_name) + self.load_generator_servicer = LoadGeneratorServiceServicerImpl() + + def install_servicers(self): + add_LoadGeneratorServiceServicer_to_server(self.load_generator_servicer, self.server) diff --git a/src/load_generator/service/LoadGeneratorServiceServicerImpl.py b/src/load_generator/service/LoadGeneratorServiceServicerImpl.py new file mode 100644 index 000000000..1fa653394 --- /dev/null +++ b/src/load_generator/service/LoadGeneratorServiceServicerImpl.py @@ -0,0 +1,63 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Optional +import grpc, logging +from apscheduler.schedulers.background import BackgroundScheduler +from common.proto.context_pb2 import Empty +from common.proto.load_generator_pb2_grpc import LoadGeneratorServiceServicer +from load_generator.load_gen.Constants import RequestType +from load_generator.load_gen.Parameters import Parameters +from load_generator.load_gen.RequestGenerator import RequestGenerator +from load_generator.load_gen.RequestScheduler import RequestScheduler + +LOGGER = logging.getLogger(__name__) + +class LoadGeneratorServiceServicerImpl(LoadGeneratorServiceServicer): + def __init__(self): + LOGGER.debug('Creating Servicer...') + self._parameters = Parameters( + num_requests = 100, + request_types = [ + RequestType.SERVICE_L2NM, + RequestType.SERVICE_L3NM, + #RequestType.SERVICE_MW, + #RequestType.SERVICE_TAPI, + RequestType.SLICE_L2NM, + RequestType.SLICE_L3NM, + ], + offered_load = 50, + holding_time = 10, + dry_mode = False, # in dry mode, no request is sent to TeraFlowSDN + record_to_dlt = False, # if record_to_dlt, changes in device/link/service/slice are uploaded to DLT + dlt_domain_id = 'dlt-perf-eval', # domain used to uploaded entities, ignored when record_to_dlt = False + ) + self._generator : Optional[RequestGenerator] = None + self._scheduler : Optional[RequestScheduler] = None + LOGGER.debug('Servicer Created') + + def Start(self, request : Empty, context : grpc.ServicerContext) -> Empty: + LOGGER.info('Initializing Generator...') + self._generator = RequestGenerator(self._parameters) + self._generator.initialize() + + LOGGER.info('Running Schedule...') + self._scheduler = RequestScheduler(self._parameters, self._generator, scheduler_class=BackgroundScheduler) + self._scheduler.start() + return Empty() + + def Stop(self, request : Empty, context : grpc.ServicerContext) -> Empty: + if self._scheduler is not None: + self._scheduler.stop() + return Empty() diff --git a/src/load_generator/service/__init__.py b/src/load_generator/service/__init__.py new file mode 100644 index 000000000..70a332512 --- /dev/null +++ b/src/load_generator/service/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/load_generator/service/__main__.py b/src/load_generator/service/__main__.py new file mode 100644 index 000000000..0f49ee244 --- /dev/null +++ b/src/load_generator/service/__main__.py @@ -0,0 +1,64 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, signal, sys, threading +from common.Constants import ServiceNameEnum +from common.Settings import ( + ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_log_level, + wait_for_environment_variables) +from .LoadGeneratorService import LoadGeneratorService + +log_level = get_log_level() +logging.basicConfig(level=log_level, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s") +LOGGER = logging.getLogger(__name__) + +terminate = threading.Event() + +def signal_handler(signal, frame): # pylint: disable=redefined-outer-name + LOGGER.warning('Terminate signal received') + terminate.set() + +def main(): + wait_for_environment_variables([ + get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_HOST ), + get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC), + get_env_var_name(ServiceNameEnum.SERVICE, ENVVAR_SUFIX_SERVICE_HOST ), + get_env_var_name(ServiceNameEnum.SERVICE, ENVVAR_SUFIX_SERVICE_PORT_GRPC), + get_env_var_name(ServiceNameEnum.SLICE, ENVVAR_SUFIX_SERVICE_HOST ), + get_env_var_name(ServiceNameEnum.SLICE, ENVVAR_SUFIX_SERVICE_PORT_GRPC), + ]) + + signal.signal(signal.SIGINT, signal_handler) + signal.signal(signal.SIGTERM, signal_handler) + + LOGGER.info('Starting...') + + # Starting load generator service + grpc_service = LoadGeneratorService() + grpc_service.start() + + # Wait for Ctrl+C or termination signal + while not terminate.wait(timeout=0.1): pass + + scheduler = grpc_service.load_generator_servicer._scheduler + if scheduler is not None: scheduler.stop() + + LOGGER.info('Terminating...') + grpc_service.stop() + + LOGGER.info('Bye') + return 0 + +if __name__ == '__main__': + sys.exit(main()) diff --git a/src/load_generator/tests/__init__.py b/src/load_generator/tests/__init__.py new file mode 100644 index 000000000..70a332512 --- /dev/null +++ b/src/load_generator/tests/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/tests/tools/load_gen/deploy_specs.sh b/src/load_generator/tests/deploy_specs.sh similarity index 95% rename from src/tests/tools/load_gen/deploy_specs.sh rename to src/load_generator/tests/deploy_specs.sh index a688f1c0a..a5af70b04 100644 --- a/src/tests/tools/load_gen/deploy_specs.sh +++ b/src/load_generator/tests/deploy_specs.sh @@ -7,7 +7,7 @@ export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/" # interdomain slice pathcomp dlt # dbscanserving opticalattackmitigator opticalattackdetector # l3_attackmitigator l3_centralizedattackdetector l3_distributedattackdetector -export TFS_COMPONENTS="context device pathcomp service slice webui" # automation monitoring compute dlt +export TFS_COMPONENTS="context device pathcomp service slice webui load_generator" # automation monitoring compute dlt # Set the tag you want to use for your images. export TFS_IMAGE_TAG="dev" diff --git a/src/tests/tools/load_gen/descriptors.json b/src/load_generator/tests/descriptors.json similarity index 100% rename from src/tests/tools/load_gen/descriptors.json rename to src/load_generator/tests/descriptors.json diff --git a/src/tests/tools/load_gen/test_dlt_functional.py b/src/load_generator/tests/test_dlt_functional.py similarity index 100% rename from src/tests/tools/load_gen/test_dlt_functional.py rename to src/load_generator/tests/test_dlt_functional.py diff --git a/src/webui/service/__init__.py b/src/webui/service/__init__.py index d60cca659..84a43d370 100644 --- a/src/webui/service/__init__.py +++ b/src/webui/service/__init__.py @@ -69,6 +69,9 @@ def create_app(use_config=None, web_app_root=None): from webui.service.main.routes import main app.register_blueprint(main) + from webui.service.load_gen.routes import load_gen + app.register_blueprint(load_gen) + from webui.service.service.routes import service app.register_blueprint(service) diff --git a/src/webui/service/load_gen/__init__.py b/src/webui/service/load_gen/__init__.py new file mode 100644 index 000000000..70a332512 --- /dev/null +++ b/src/webui/service/load_gen/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/webui/service/load_gen/routes.py b/src/webui/service/load_gen/routes.py new file mode 100644 index 000000000..fc091f3b4 --- /dev/null +++ b/src/webui/service/load_gen/routes.py @@ -0,0 +1,45 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from flask import render_template, Blueprint, flash +from common.proto.context_pb2 import Empty +from load_generator.client.LoadGeneratorClient import LoadGeneratorClient + +load_gen = Blueprint('load_gen', __name__, url_prefix='/load_gen') + +@load_gen.route('start', methods=['GET']) +def start(): + load_gen_client = LoadGeneratorClient() + try: + load_gen_client.connect() + load_gen_client.Start(Empty()) + load_gen_client.close() + flash('Load Generator Started.', 'success') + except Exception as e: # pylint: disable=broad-except + flash('Problem starting Load Generator. {:s}'.format(str(e)), 'danger') + + return render_template('main/debug.html') + +@load_gen.route('stop', methods=['GET']) +def stop(): + load_gen_client = LoadGeneratorClient() + try: + load_gen_client.connect() + load_gen_client.Stop(Empty()) + load_gen_client.close() + flash('Load Generator Stoped.', 'success') + except Exception as e: # pylint: disable=broad-except + flash('Problem stopping Load Generator. {:s}'.format(str(e)), 'danger') + + return render_template('main/debug.html') diff --git a/src/webui/service/templates/main/debug.html b/src/webui/service/templates/main/debug.html index d065cc49d..4b3e289c3 100644 --- a/src/webui/service/templates/main/debug.html +++ b/src/webui/service/templates/main/debug.html @@ -19,18 +19,25 @@ {% block content %} <h1>Debug</h1> - <h3>Dump ContextDB:</h3> - <ul> - <li> - <a class="nav-link" href="/context/api/dump/html" id="context_html_link" target="context_html"> - as HTML - </a> - </li> - <li> - <a class="nav-link" href="/context/api/dump/text" id="context_text_link" target="context_text"> - as Text - </a> - </li> - </ul> + <!-- + <h3>Dump ContextDB:</h3> + <ul> + <li> + <a class="nav-link" href="/context/api/dump/html" id="context_html_link" target="context_html"> + as HTML + </a> + </li> + <li> + <a class="nav-link" href="/context/api/dump/text" id="context_text_link" target="context_text"> + as Text + </a> + </li> + </ul> + --> + + <h3>Load Generator:</h3> + <a href="{{ url_for('load_gen.run') }}" class="btn btn-primary" style="margin-bottom: 10px;"> + Run + </a> {% endblock %} -- GitLab From 0462edf6c12cf77b00728dc8973b2130097092ba Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Fri, 20 Jan 2023 16:17:28 +0000 Subject: [PATCH 085/158] WebUI component: - added dependencies for load generator --- src/common/method_wrappers/tests/deploy_specs.sh | 2 +- src/webui/Dockerfile | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/src/common/method_wrappers/tests/deploy_specs.sh b/src/common/method_wrappers/tests/deploy_specs.sh index ab90ab3d3..a5af70b04 100644 --- a/src/common/method_wrappers/tests/deploy_specs.sh +++ b/src/common/method_wrappers/tests/deploy_specs.sh @@ -7,7 +7,7 @@ export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/" # interdomain slice pathcomp dlt # dbscanserving opticalattackmitigator opticalattackdetector # l3_attackmitigator l3_centralizedattackdetector l3_distributedattackdetector -export TFS_COMPONENTS="context device pathcomp service slice webui load_generator" # automation monitoring compute +export TFS_COMPONENTS="context device pathcomp service slice webui load_generator" # automation monitoring compute dlt # Set the tag you want to use for your images. export TFS_IMAGE_TAG="dev" diff --git a/src/webui/Dockerfile b/src/webui/Dockerfile index a17d2bd9a..f0ab35629 100644 --- a/src/webui/Dockerfile +++ b/src/webui/Dockerfile @@ -77,6 +77,8 @@ COPY --chown=webui:webui src/context/__init__.py context/__init__.py COPY --chown=webui:webui src/context/client/. context/client/ COPY --chown=webui:webui src/device/__init__.py device/__init__.py COPY --chown=webui:webui src/device/client/. device/client/ +COPY --chown=webui:webui src/load_generator/__init__.py load_generator/__init__.py +COPY --chown=webui:webui src/load_generator/client/. load_generator/client/ COPY --chown=webui:webui src/service/__init__.py service/__init__.py COPY --chown=webui:webui src/service/client/. service/client/ COPY --chown=webui:webui src/slice/__init__.py slice/__init__.py -- GitLab From fea31d305a7370efaa8de8c9c36c2c30c9e40dd0 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Fri, 20 Jan 2023 16:23:12 +0000 Subject: [PATCH 086/158] Load Generator component: - Corrected Dockerfile --- src/load_generator/Dockerfile | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/src/load_generator/Dockerfile b/src/load_generator/Dockerfile index 2e5427a34..8f59bb4db 100644 --- a/src/load_generator/Dockerfile +++ b/src/load_generator/Dockerfile @@ -62,10 +62,16 @@ RUN python3 -m pip install -r requirements.txt # Add component files into working directory WORKDIR /var/teraflow -COPY src/context/. context/ -COPY src/dlt/. dlt/ -COPY src/service/. service/ -COPY src/slice/. slice/ +COPY src/context/__init__.py context/__init__.py +COPY src/context/client/. context/client/ +COPY src/dlt/__init__.py dlt/__init__.py +COPY src/dlt/connector/__init__.py dlt/connector/__init__.py +COPY src/dlt/connector/. dlt/connector/ +COPY src/load_generator/. load_generator/ +COPY src/service/__init__.py service/__init__.py +COPY src/service/client/. service/client/ +COPY src/slice/__init__.py slice/__init__.py +COPY src/slice/client/. slice/client/ # Start the service ENTRYPOINT ["python", "-m", "load_generator.service"] -- GitLab From 3066dea07d7f62056d4a506d06399e600fc389c3 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Fri, 20 Jan 2023 16:26:33 +0000 Subject: [PATCH 087/158] WebUI component: - corrected load generator commands --- src/webui/service/templates/main/debug.html | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/webui/service/templates/main/debug.html b/src/webui/service/templates/main/debug.html index 4b3e289c3..1ab3be251 100644 --- a/src/webui/service/templates/main/debug.html +++ b/src/webui/service/templates/main/debug.html @@ -36,8 +36,7 @@ --> <h3>Load Generator:</h3> - <a href="{{ url_for('load_gen.run') }}" class="btn btn-primary" style="margin-bottom: 10px;"> - Run - </a> + <a href="{{ url_for('load_gen.start') }}" class="btn btn-primary" style="margin-bottom: 10px;">Start</a> + <a href="{{ url_for('load_gen.stop') }}" class="btn btn-primary" style="margin-bottom: 10px;">Stop</a> {% endblock %} -- GitLab From 7ffa0dcfb0495bc30bb31f9c7f960758a6d1f814 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Fri, 20 Jan 2023 16:32:06 +0000 Subject: [PATCH 088/158] Common: - corrected service name of load generator --- src/common/Constants.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/common/Constants.py b/src/common/Constants.py index bd403c084..0c3afe43c 100644 --- a/src/common/Constants.py +++ b/src/common/Constants.py @@ -55,7 +55,7 @@ class ServiceNameEnum(Enum): # Used for test and debugging only DLT_GATEWAY = 'dltgateway' - LOAD_GENERATOR = 'load_generator' + LOAD_GENERATOR = 'load-generator' # Default gRPC service ports DEFAULT_SERVICE_GRPC_PORTS = { -- GitLab From c14a8d3e4565900552c4b773cb73e48014575feb Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Fri, 20 Jan 2023 16:33:03 +0000 Subject: [PATCH 089/158] WebUI component: - deactivated Grafana pannel --- manifests/webuiservice.yaml | 80 ++++++++++++++++++------------------- 1 file changed, 40 insertions(+), 40 deletions(-) diff --git a/manifests/webuiservice.yaml b/manifests/webuiservice.yaml index 7f70e837c..dd8004ad8 100644 --- a/manifests/webuiservice.yaml +++ b/manifests/webuiservice.yaml @@ -60,43 +60,43 @@ spec: limits: cpu: 700m memory: 1024Mi - - name: grafana - image: grafana/grafana:8.5.11 - imagePullPolicy: IfNotPresent - ports: - - containerPort: 3000 - name: http-grafana - protocol: TCP - env: - - name: GF_SERVER_ROOT_URL - value: "http://0.0.0.0:3000/grafana/" - - name: GF_SERVER_SERVE_FROM_SUB_PATH - value: "true" - readinessProbe: - failureThreshold: 3 - httpGet: - path: /robots.txt - port: 3000 - scheme: HTTP - initialDelaySeconds: 10 - periodSeconds: 30 - successThreshold: 1 - timeoutSeconds: 2 - livenessProbe: - failureThreshold: 3 - initialDelaySeconds: 30 - periodSeconds: 10 - successThreshold: 1 - tcpSocket: - port: 3000 - timeoutSeconds: 1 - resources: - requests: - cpu: 250m - memory: 750Mi - limits: - cpu: 700m - memory: 1024Mi + #- name: grafana + # image: grafana/grafana:8.5.11 + # imagePullPolicy: IfNotPresent + # ports: + # - containerPort: 3000 + # name: http-grafana + # protocol: TCP + # env: + # - name: GF_SERVER_ROOT_URL + # value: "http://0.0.0.0:3000/grafana/" + # - name: GF_SERVER_SERVE_FROM_SUB_PATH + # value: "true" + # readinessProbe: + # failureThreshold: 3 + # httpGet: + # path: /robots.txt + # port: 3000 + # scheme: HTTP + # initialDelaySeconds: 10 + # periodSeconds: 30 + # successThreshold: 1 + # timeoutSeconds: 2 + # livenessProbe: + # failureThreshold: 3 + # initialDelaySeconds: 30 + # periodSeconds: 10 + # successThreshold: 1 + # tcpSocket: + # port: 3000 + # timeoutSeconds: 1 + # resources: + # requests: + # cpu: 250m + # memory: 750Mi + # limits: + # cpu: 700m + # memory: 1024Mi --- apiVersion: v1 kind: Service @@ -110,6 +110,6 @@ spec: - name: webui port: 8004 targetPort: 8004 - - name: grafana - port: 3000 - targetPort: 3000 + #- name: grafana + # port: 3000 + # targetPort: 3000 -- GitLab From 3eca13f7a6b724513bd438d381c7117681e36f01 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Fri, 20 Jan 2023 16:51:32 +0000 Subject: [PATCH 090/158] Load Generator component: - corrected device name retrieval - corrected composition of router_ids and and IP addresses - minor code formatting --- .../load_gen/RequestGenerator.py | 191 +++++++++++------- 1 file changed, 117 insertions(+), 74 deletions(-) diff --git a/src/load_generator/load_gen/RequestGenerator.py b/src/load_generator/load_gen/RequestGenerator.py index e983f90dc..b7b1432f4 100644 --- a/src/load_generator/load_gen/RequestGenerator.py +++ b/src/load_generator/load_gen/RequestGenerator.py @@ -15,6 +15,7 @@ import logging, json, random, threading from typing import Dict, Optional, Set, Tuple from common.proto.context_pb2 import Empty, TopologyId +from common.tools.grpc.Tools import grpc_message_to_json from common.tools.object_factory.Constraint import json_constraint_custom from common.tools.object_factory.ConfigRule import json_config_rule_set from common.tools.object_factory.Device import json_device_id @@ -41,6 +42,9 @@ class RequestGenerator: self._endpoint_ids_to_types : Dict[Tuple[str, str], str] = dict() self._endpoint_types_to_ids : Dict[str, Set[Tuple[str, str]]] = dict() + self._device_data : Dict[str, Dict] = dict() + self._device_endpoint_data : Dict[str, Dict[str, Dict]] = dict() + def initialize(self) -> None: with self._lock: self._available_device_endpoints.clear() @@ -55,9 +59,14 @@ class RequestGenerator: devices = context_client.ListDevices(Empty()) for device in devices.devices: device_uuid = device.device_id.device_uuid.uuid + self._device_data[device_uuid] = grpc_message_to_json(device) + _endpoints = self._available_device_endpoints.setdefault(device_uuid, set()) for endpoint in device.device_endpoints: endpoint_uuid = endpoint.endpoint_id.endpoint_uuid.uuid + endpoints = self._device_endpoint_data.setdefault(device_uuid, dict()) + endpoints[endpoint_uuid] = grpc_message_to_json(endpoint) + endpoint_type = endpoint.endpoint_type _endpoints.add(endpoint_uuid) self._endpoint_ids_to_types.setdefault((device_uuid, endpoint_uuid), endpoint_type) @@ -191,7 +200,8 @@ class RequestGenerator: dst_endpoint_types = {dst_endpoint_type} if request_type in {RequestType.SERVICE_TAPI} else None # identify excluded destination devices - exclude_device_uuids = {} if request_type in {RequestType.SERVICE_TAPI, RequestType.SERVICE_MW} else {src_device_uuid} + REQUESTTYPES_REUSING_DEVICES = {RequestType.SERVICE_TAPI, RequestType.SERVICE_MW} + exclude_device_uuids = {} if request_type in REQUESTTYPES_REUSING_DEVICES else {src_device_uuid} # choose feasible destination endpoint dst = self._use_device_endpoint( @@ -218,26 +228,33 @@ class RequestGenerator: ] vlan_id = num_request % 1000 circuit_id = '{:03d}'.format(vlan_id) - src_router_id = '10.0.0.{:d}'.format(int(src_device_uuid.replace('R', ''))) - dst_router_id = '10.0.0.{:d}'.format(int(src_device_uuid.replace('R', ''))) + + src_device_name = self._device_data[src_device_uuid]['name'] + src_router_id = '10.0.0.{:d}'.format(int(src_device_name.replace('R', ''))) + + dst_device_name = self._device_data[dst_device_uuid]['name'] + dst_router_id = '10.0.0.{:d}'.format(int(dst_device_name.replace('R', ''))) + config_rules = [ json_config_rule_set('/settings', { 'mtu': 1512 }), - json_config_rule_set('/device[{:s}]/endpoint[{:s}]/settings'.format(src_device_uuid, src_endpoint_uuid), { - 'router_id': src_router_id, - 'sub_interface_index': vlan_id, - 'vlan_id': vlan_id, - 'remote_router': dst_router_id, - 'circuit_id': circuit_id, - }), - json_config_rule_set('/device[{:s}]/endpoint[{:s}]/settings'.format(dst_device_uuid, dst_endpoint_uuid), { - 'router_id': dst_router_id, - 'sub_interface_index': vlan_id, - 'vlan_id': vlan_id, - 'remote_router': src_router_id, - 'circuit_id': circuit_id, - }), + json_config_rule_set( + '/device[{:s}]/endpoint[{:s}]/settings'.format(src_device_uuid, src_endpoint_uuid), { + 'router_id': src_router_id, + 'sub_interface_index': vlan_id, + 'vlan_id': vlan_id, + 'remote_router': dst_router_id, + 'circuit_id': circuit_id, + }), + json_config_rule_set( + '/device[{:s}]/endpoint[{:s}]/settings'.format(dst_device_uuid, dst_endpoint_uuid), { + 'router_id': dst_router_id, + 'sub_interface_index': vlan_id, + 'vlan_id': vlan_id, + 'remote_router': src_router_id, + 'circuit_id': circuit_id, + }), ] return json_service_l2nm_planned( request_uuid, endpoint_ids=endpoint_ids, constraints=constraints, config_rules=config_rules) @@ -251,32 +268,41 @@ class RequestGenerator: bgp_as = 60000 + (num_request % 10000) bgp_route_target = '{:5d}:{:03d}'.format(bgp_as, 333) route_distinguisher = '{:5d}:{:03d}'.format(bgp_as, vlan_id) - src_router_id = '10.0.0.{:d}'.format(int(src_device_uuid.replace('R', ''))) - dst_router_id = '10.0.0.{:d}'.format(int(src_device_uuid.replace('R', ''))) - src_address_ip = '.'.join([src_device_uuid.replace('R', ''), '0'] + src_endpoint_uuid.split('/')) - dst_address_ip = '.'.join([dst_device_uuid.replace('R', ''), '0'] + dst_endpoint_uuid.split('/')) + + src_device_name = self._device_data[src_device_uuid]['name'] + src_endpoint_name = self._device_endpoint_data[src_device_uuid][src_endpoint_uuid]['name'] + src_router_id = '10.0.0.{:d}'.format(int(src_device_name.replace('R', ''))) + src_address_ip = '.'.join([src_device_name.replace('R', ''), '0'] + src_endpoint_name.split('/')) + + dst_device_name = self._device_data[dst_device_uuid]['name'] + dst_endpoint_name = self._device_endpoint_data[dst_device_uuid][dst_endpoint_uuid]['name'] + dst_router_id = '10.0.0.{:d}'.format(int(dst_device_name.replace('R', ''))) + dst_address_ip = '.'.join([dst_device_name.replace('R', ''), '0'] + dst_endpoint_name.split('/')) + config_rules = [ json_config_rule_set('/settings', { 'mtu' : 1512, 'bgp_as' : bgp_as, 'bgp_route_target': bgp_route_target, }), - json_config_rule_set('/device[{:s}]/endpoint[{:s}]/settings'.format(src_device_uuid, src_endpoint_uuid), { - 'router_id' : src_router_id, - 'route_distinguisher': route_distinguisher, - 'sub_interface_index': vlan_id, - 'vlan_id' : vlan_id, - 'address_ip' : src_address_ip, - 'address_prefix' : 16, - }), - json_config_rule_set('/device[{:s}]/endpoint[{:s}]/settings'.format(dst_device_uuid, dst_endpoint_uuid), { - 'router_id' : dst_router_id, - 'route_distinguisher': route_distinguisher, - 'sub_interface_index': vlan_id, - 'vlan_id' : vlan_id, - 'address_ip' : dst_address_ip, - 'address_prefix' : 16, - }), + json_config_rule_set( + '/device[{:s}]/endpoint[{:s}]/settings'.format(src_device_uuid, src_endpoint_uuid), { + 'router_id' : src_router_id, + 'route_distinguisher': route_distinguisher, + 'sub_interface_index': vlan_id, + 'vlan_id' : vlan_id, + 'address_ip' : src_address_ip, + 'address_prefix' : 16, + }), + json_config_rule_set( + '/device[{:s}]/endpoint[{:s}]/settings'.format(dst_device_uuid, dst_endpoint_uuid), { + 'router_id' : dst_router_id, + 'route_distinguisher': route_distinguisher, + 'sub_interface_index': vlan_id, + 'vlan_id' : vlan_id, + 'address_ip' : dst_address_ip, + 'address_prefix' : 16, + }), ] return json_service_l3nm_planned( request_uuid, endpoint_ids=endpoint_ids, constraints=constraints, config_rules=config_rules) @@ -313,7 +339,8 @@ class RequestGenerator: src_device_uuid,src_endpoint_uuid = src # identify excluded destination devices - exclude_device_uuids = {} if request_type in {RequestType.SERVICE_TAPI, RequestType.SERVICE_MW} else {src_device_uuid} + REQUESTTYPES_REUSING_DEVICES = {RequestType.SERVICE_TAPI, RequestType.SERVICE_MW} + exclude_device_uuids = {} if request_type in REQUESTTYPES_REUSING_DEVICES else {src_device_uuid} # choose feasible destination endpoint dst = self._use_device_endpoint(request_uuid, request_type, exclude_device_uuids=exclude_device_uuids) @@ -338,26 +365,33 @@ class RequestGenerator: if request_type == RequestType.SLICE_L2NM: vlan_id = num_request % 1000 circuit_id = '{:03d}'.format(vlan_id) - src_router_id = '10.0.0.{:d}'.format(int(src_device_uuid.replace('R', ''))) - dst_router_id = '10.0.0.{:d}'.format(int(src_device_uuid.replace('R', ''))) + + src_device_name = self._device_data[src_device_uuid]['name'] + src_router_id = '10.0.0.{:d}'.format(int(src_device_name.replace('R', ''))) + + dst_device_name = self._device_data[dst_device_uuid]['name'] + dst_router_id = '10.0.0.{:d}'.format(int(dst_device_name.replace('R', ''))) + config_rules = [ json_config_rule_set('/settings', { 'mtu': 1512 }), - json_config_rule_set('/device[{:s}]/endpoint[{:s}]/settings'.format(src_device_uuid, src_endpoint_uuid), { - 'router_id': src_router_id, - 'sub_interface_index': vlan_id, - 'vlan_id': vlan_id, - 'remote_router': dst_router_id, - 'circuit_id': circuit_id, - }), - json_config_rule_set('/device[{:s}]/endpoint[{:s}]/settings'.format(dst_device_uuid, dst_endpoint_uuid), { - 'router_id': dst_router_id, - 'sub_interface_index': vlan_id, - 'vlan_id': vlan_id, - 'remote_router': src_router_id, - 'circuit_id': circuit_id, - }), + json_config_rule_set( + '/device[{:s}]/endpoint[{:s}]/settings'.format(src_device_uuid, src_endpoint_uuid), { + 'router_id': src_router_id, + 'sub_interface_index': vlan_id, + 'vlan_id': vlan_id, + 'remote_router': dst_router_id, + 'circuit_id': circuit_id, + }), + json_config_rule_set( + '/device[{:s}]/endpoint[{:s}]/settings'.format(dst_device_uuid, dst_endpoint_uuid), { + 'router_id': dst_router_id, + 'sub_interface_index': vlan_id, + 'vlan_id': vlan_id, + 'remote_router': src_router_id, + 'circuit_id': circuit_id, + }), ] elif request_type == RequestType.SLICE_L3NM: @@ -365,32 +399,41 @@ class RequestGenerator: bgp_as = 60000 + (num_request % 10000) bgp_route_target = '{:5d}:{:03d}'.format(bgp_as, 333) route_distinguisher = '{:5d}:{:03d}'.format(bgp_as, vlan_id) - src_router_id = '10.0.0.{:d}'.format(int(src_device_uuid.replace('R', ''))) - dst_router_id = '10.0.0.{:d}'.format(int(src_device_uuid.replace('R', ''))) - src_address_ip = '.'.join([src_device_uuid.replace('R', ''), '0'] + src_endpoint_uuid.split('/')) - dst_address_ip = '.'.join([dst_device_uuid.replace('R', ''), '0'] + dst_endpoint_uuid.split('/')) + + src_device_name = self._device_data[src_device_uuid]['name'] + src_endpoint_name = self._device_endpoint_data[src_device_uuid][src_endpoint_uuid]['name'] + src_router_id = '10.0.0.{:d}'.format(int(src_device_name.replace('R', ''))) + src_address_ip = '.'.join([src_device_name.replace('R', ''), '0'] + src_endpoint_name.split('/')) + + dst_device_name = self._device_data[dst_device_uuid]['name'] + dst_endpoint_name = self._device_endpoint_data[dst_device_uuid][dst_endpoint_uuid]['name'] + dst_router_id = '10.0.0.{:d}'.format(int(dst_device_name.replace('R', ''))) + dst_address_ip = '.'.join([dst_device_name.replace('R', ''), '0'] + dst_endpoint_name.split('/')) + config_rules = [ json_config_rule_set('/settings', { 'mtu' : 1512, 'bgp_as' : bgp_as, 'bgp_route_target': bgp_route_target, }), - json_config_rule_set('/device[{:s}]/endpoint[{:s}]/settings'.format(src_device_uuid, src_endpoint_uuid), { - 'router_id' : src_router_id, - 'route_distinguisher': route_distinguisher, - 'sub_interface_index': vlan_id, - 'vlan_id' : vlan_id, - 'address_ip' : src_address_ip, - 'address_prefix' : 16, - }), - json_config_rule_set('/device[{:s}]/endpoint[{:s}]/settings'.format(dst_device_uuid, dst_endpoint_uuid), { - 'router_id' : dst_router_id, - 'route_distinguisher': route_distinguisher, - 'sub_interface_index': vlan_id, - 'vlan_id' : vlan_id, - 'address_ip' : dst_address_ip, - 'address_prefix' : 16, - }), + json_config_rule_set( + '/device[{:s}]/endpoint[{:s}]/settings'.format(src_device_uuid, src_endpoint_uuid), { + 'router_id' : src_router_id, + 'route_distinguisher': route_distinguisher, + 'sub_interface_index': vlan_id, + 'vlan_id' : vlan_id, + 'address_ip' : src_address_ip, + 'address_prefix' : 16, + }), + json_config_rule_set( + '/device[{:s}]/endpoint[{:s}]/settings'.format(dst_device_uuid, dst_endpoint_uuid), { + 'router_id' : dst_router_id, + 'route_distinguisher': route_distinguisher, + 'sub_interface_index': vlan_id, + 'vlan_id' : vlan_id, + 'address_ip' : dst_address_ip, + 'address_prefix' : 16, + }), ] return json_slice( -- GitLab From 558a3f00d047300c7a366f5439c4a5aa7fbf240d Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Fri, 20 Jan 2023 16:59:00 +0000 Subject: [PATCH 091/158] Context component: - corrected SetService when no endpoints are provided --- src/context/service/database/Service.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/src/context/service/database/Service.py b/src/context/service/database/Service.py index b65010fed..76a830535 100644 --- a/src/context/service/database/Service.py +++ b/src/context/service/database/Service.py @@ -118,11 +118,12 @@ def service_set(db_engine : Engine, request : Service) -> Tuple[Dict, bool]: created_at,updated_at = session.execute(stmt).fetchone() updated = updated_at > created_at - stmt = insert(ServiceEndPointModel).values(service_endpoints_data) - stmt = stmt.on_conflict_do_nothing( - index_elements=[ServiceEndPointModel.service_uuid, ServiceEndPointModel.endpoint_uuid] - ) - session.execute(stmt) + if len(service_endpoints_data) > 0: + stmt = insert(ServiceEndPointModel).values(service_endpoints_data) + stmt = stmt.on_conflict_do_nothing( + index_elements=[ServiceEndPointModel.service_uuid, ServiceEndPointModel.endpoint_uuid] + ) + session.execute(stmt) constraint_updates = upsert_constraints(session, constraints, service_uuid=service_uuid) updated = updated or any([(updated_at > created_at) for created_at,updated_at in constraint_updates]) -- GitLab From 6de7468fee10244f6ae9ee41583cc1f394fd14f3 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Fri, 20 Jan 2023 17:12:33 +0000 Subject: [PATCH 092/158] Common Object Factory: - added context "admin" as default for slices --- src/common/tools/object_factory/Slice.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/common/tools/object_factory/Slice.py b/src/common/tools/object_factory/Slice.py index 6ab666aa6..970b12ad9 100644 --- a/src/common/tools/object_factory/Slice.py +++ b/src/common/tools/object_factory/Slice.py @@ -14,7 +14,9 @@ import copy from typing import Dict, List, Optional +from common.Constants import DEFAULT_CONTEXT_NAME from common.proto.context_pb2 import SliceStatusEnum +from common.tools.object_factory.Context import json_context_id def get_slice_uuid(a_endpoint_id : Dict, z_endpoint_id : Dict) -> str: return 'slc:{:s}/{:s}=={:s}/{:s}'.format( @@ -35,6 +37,8 @@ def json_slice( constraints : List[Dict] = [], config_rules : List[Dict] = [], service_ids : List[Dict] = [], subslice_ids : List[Dict] = [], owner : Optional[Dict] = None): + if context_id is None: context_id = json_context_id(DEFAULT_CONTEXT_NAME) + result = { 'slice_id' : json_slice_id(slice_uuid, context_id=context_id), 'slice_status' : {'slice_status': status}, -- GitLab From 20d8340fe46bee8722f0e069bf2109eb92036e3d Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Fri, 20 Jan 2023 17:26:18 +0000 Subject: [PATCH 093/158] Compute/InterDomain/Slice components: - removed check of reply.entity_id==request.entity_id; context computes UUID-like identifiers based from requested identifiers for performance reasons --- .../nbi_plugins/ietf_l2vpn/L2VPN_Services.py | 4 +--- .../ietf_l2vpn/L2VPN_SiteNetworkAccesses.py | 4 +--- .../service/InterdomainServiceServicerImpl.py | 10 ++++------ .../_old_code/InterdomainServiceServicerImpl.py | 12 +++++------- src/slice/service/SliceServiceServicerImpl.py | 11 +++-------- 5 files changed, 14 insertions(+), 27 deletions(-) diff --git a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Services.py b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Services.py index f27d852f0..248b99896 100644 --- a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Services.py +++ b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Services.py @@ -50,9 +50,7 @@ class L2VPN_Services(Resource): slice_request.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_PLANNED slice_client = SliceClient() - slice_reply = slice_client.CreateSlice(slice_request) - if slice_reply != slice_request.slice_id: # pylint: disable=no-member - raise Exception('Slice creation failed. Wrong Slice Id was returned') + slice_client.CreateSlice(slice_request) response = jsonify({}) response.status_code = HTTP_CREATED diff --git a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py index 819d8995d..0b8305ed7 100644 --- a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py +++ b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py @@ -129,9 +129,7 @@ def process_list_site_network_access( sna_request = process_site_network_access(context_client, site_id, site_network_access) LOGGER.debug('sna_request = {:s}'.format(grpc_message_to_json_string(sna_request))) try: - sna_reply = slice_client.UpdateSlice(sna_request) - if sna_reply != sna_request.slice_id: # pylint: disable=no-member - raise Exception('Slice update failed. Wrong Slice Id was returned') + slice_client.UpdateSlice(sna_request) except Exception as e: # pylint: disable=broad-except msg = 'Something went wrong Updating VPN {:s}' LOGGER.exception(msg.format(grpc_message_to_json_string(sna_request))) diff --git a/src/interdomain/service/InterdomainServiceServicerImpl.py b/src/interdomain/service/InterdomainServiceServicerImpl.py index c0c351451..6844393fe 100644 --- a/src/interdomain/service/InterdomainServiceServicerImpl.py +++ b/src/interdomain/service/InterdomainServiceServicerImpl.py @@ -13,7 +13,7 @@ # limitations under the License. import grpc, logging, uuid -from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID +from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME from common.proto.context_pb2 import AuthenticationResult, Slice, SliceId, SliceStatusEnum, TeraFlowController, TopologyId from common.proto.interdomain_pb2_grpc import InterdomainServiceServicer from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method @@ -95,17 +95,15 @@ class InterdomainServiceServicerImpl(InterdomainServiceServicer): LOGGER.info('[loop] [local] domain_uuid={:s} is_local_domain={:s} slice_uuid={:s}'.format( str(domain_uuid), str(is_local_domain), str(slice_uuid))) - # local slices always in DEFAULT_CONTEXT_UUID + # local slices always in DEFAULT_CONTEXT_NAME #context_uuid = request.slice_id.context_id.context_uuid.uuid - context_uuid = DEFAULT_CONTEXT_UUID + context_uuid = DEFAULT_CONTEXT_NAME endpoint_ids = map_abstract_endpoints_to_real(context_client, domain_uuid, endpoint_ids) sub_slice = compose_slice( context_uuid, slice_uuid, endpoint_ids, constraints=request.slice_constraints, config_rules=request.slice_config.config_rules) LOGGER.info('[loop] [local] sub_slice={:s}'.format(grpc_message_to_json_string(sub_slice))) sub_slice_id = slice_client.CreateSlice(sub_slice) - if sub_slice_id != sub_slice.slice_id: # pylint: disable=no-member - raise Exception('Local Slice creation failed. Wrong Slice Id was returned') else: slice_uuid = request.slice_id.slice_uuid.uuid LOGGER.info('[loop] [remote] domain_uuid={:s} is_local_domain={:s} slice_uuid={:s}'.format( @@ -113,7 +111,7 @@ class InterdomainServiceServicerImpl(InterdomainServiceServicer): # create context/topology for the remote domains where we are creating slices create_context(context_client, domain_uuid) - create_topology(context_client, domain_uuid, DEFAULT_TOPOLOGY_UUID) + create_topology(context_client, domain_uuid, DEFAULT_TOPOLOGY_NAME) sub_slice = compose_slice( domain_uuid, slice_uuid, endpoint_ids, constraints=request.slice_constraints, diff --git a/src/interdomain/service/_old_code/InterdomainServiceServicerImpl.py b/src/interdomain/service/_old_code/InterdomainServiceServicerImpl.py index f38185781..00c0b8d77 100644 --- a/src/interdomain/service/_old_code/InterdomainServiceServicerImpl.py +++ b/src/interdomain/service/_old_code/InterdomainServiceServicerImpl.py @@ -108,16 +108,14 @@ class InterdomainServiceServicerImpl(InterdomainServiceServicer): slice_endpoint_id.device_id.device_uuid.uuid = 'R1@D2' slice_endpoint_id.endpoint_uuid.uuid = '2/1' - local_slice_reply = slice_client.CreateSlice(local_slice_request) - if local_slice_reply != local_slice_request.slice_id: # pylint: disable=no-member - raise Exception('Local Slice creation failed. Wrong Slice Id was returned') + local_slice_id_reply = slice_client.CreateSlice(local_slice_request) subslice_id = reply.slice_subslice_ids.add() - subslice_id.context_id.context_uuid.uuid = local_slice_request.slice_id.context_id.context_uuid.uuid - subslice_id.slice_uuid.uuid = local_slice_request.slice_id.slice_uuid.uuid + subslice_id.context_id.context_uuid.uuid = local_slice_id_reply.context_id.context_uuid.uuid + subslice_id.slice_uuid.uuid = local_slice_id_reply.slice_uuid.uuid - context_client.SetSlice(reply) - return reply.slice_id + reply_slice_id = context_client.SetSlice(reply) + return reply_slice_id @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def Authenticate(self, request : TeraFlowController, context : grpc.ServicerContext) -> AuthenticationResult: diff --git a/src/slice/service/SliceServiceServicerImpl.py b/src/slice/service/SliceServiceServicerImpl.py index aa41a77ac..d693abd8f 100644 --- a/src/slice/service/SliceServiceServicerImpl.py +++ b/src/slice/service/SliceServiceServicerImpl.py @@ -81,7 +81,7 @@ class SliceServiceServicerImpl(SliceServiceServicer): service_id = ServiceId() # pylint: disable=no-member context_uuid = service_id.context_id.context_uuid.uuid = request.slice_id.context_id.context_uuid.uuid - slice_uuid = service_uuid = service_id.service_uuid.uuid = request.slice_id.slice_uuid.uuid + service_uuid = service_id.service_uuid.uuid = request.slice_id.slice_uuid.uuid service_client = ServiceClient() try: @@ -92,10 +92,7 @@ class SliceServiceServicerImpl(SliceServiceServicer): service_request.service_id.CopyFrom(service_id) service_request.service_type = ServiceTypeEnum.SERVICETYPE_UNKNOWN service_request.service_status.service_status = ServiceStatusEnum.SERVICESTATUS_PLANNED - service_reply = service_client.CreateService(service_request) - if service_reply != service_request.service_id: # pylint: disable=no-member - # pylint: disable=raise-missing-from - raise Exception('Service creation failed. Wrong Service Id was returned') + service_client.CreateService(service_request) _service = context_client.GetService(service_id) service_request = Service() service_request.CopyFrom(_service) @@ -137,9 +134,7 @@ class SliceServiceServicerImpl(SliceServiceServicer): service_request.service_type = ServiceTypeEnum.SERVICETYPE_L2NM LOGGER.info('assume L2') - service_reply = service_client.UpdateService(service_request) - if service_reply != service_request.service_id: # pylint: disable=no-member - raise Exception('Service update failed. Wrong Service Id was returned') + service_client.UpdateService(service_request) copy_endpoint_ids(request.slice_endpoint_ids, slice_request.slice_endpoint_ids) copy_constraints(request.slice_constraints, slice_request.slice_constraints) -- GitLab From 2460257e9eb03fe2e1718f4441a3332ce1208ff5 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Fri, 20 Jan 2023 17:51:47 +0000 Subject: [PATCH 094/158] Context component: - corrected slice_unset query filters --- src/context/service/database/Slice.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/src/context/service/database/Slice.py b/src/context/service/database/Slice.py index b0b83238c..717cac9cf 100644 --- a/src/context/service/database/Slice.py +++ b/src/context/service/database/Slice.py @@ -13,7 +13,6 @@ # limitations under the License. import datetime, logging -from sqlalchemy import and_ from sqlalchemy.dialects.postgresql import insert from sqlalchemy.engine import Engine from sqlalchemy.orm import Session, sessionmaker @@ -203,20 +202,20 @@ def slice_unset(db_engine : Engine, request : Slice) -> Tuple[Dict, bool]: def callback(session : Session) -> bool: num_deletes = 0 num_deletes += session.query(SliceServiceModel)\ - .filter_by(and_( + .filter_by( SliceServiceModel.slice_uuid == slice_uuid, SliceServiceModel.service_uuid.in_(slice_service_uuids) - )).delete() + ).delete() num_deletes += session.query(SliceSubSliceModel)\ - .filter_by(and_( + .filter_by( SliceSubSliceModel.slice_uuid == slice_uuid, SliceSubSliceModel.subslice_uuid.in_(slice_subslice_uuids) - )).delete() + ).delete() num_deletes += session.query(SliceEndPointModel)\ - .filter_by(and_( + .filter_by( SliceEndPointModel.slice_uuid == slice_uuid, SliceEndPointModel.endpoint_uuid.in_(slice_endpoint_uuids) - )).delete() + ).delete() return num_deletes > 0 updated = run_transaction(sessionmaker(bind=db_engine), callback) -- GitLab From e75818fc4fd6edc0423110bb90a3b96d58fc9a66 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Fri, 20 Jan 2023 18:03:32 +0000 Subject: [PATCH 095/158] Context component: - corrected slice_unset query filters --- src/context/service/database/Slice.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/src/context/service/database/Slice.py b/src/context/service/database/Slice.py index 717cac9cf..4c65886ab 100644 --- a/src/context/service/database/Slice.py +++ b/src/context/service/database/Slice.py @@ -13,6 +13,7 @@ # limitations under the License. import datetime, logging +from sqlalchemy import and_ from sqlalchemy.dialects.postgresql import insert from sqlalchemy.engine import Engine from sqlalchemy.orm import Session, sessionmaker @@ -202,20 +203,20 @@ def slice_unset(db_engine : Engine, request : Slice) -> Tuple[Dict, bool]: def callback(session : Session) -> bool: num_deletes = 0 num_deletes += session.query(SliceServiceModel)\ - .filter_by( + .filter(and_( SliceServiceModel.slice_uuid == slice_uuid, SliceServiceModel.service_uuid.in_(slice_service_uuids) - ).delete() + )).delete() num_deletes += session.query(SliceSubSliceModel)\ - .filter_by( + .filter_by(and_( SliceSubSliceModel.slice_uuid == slice_uuid, SliceSubSliceModel.subslice_uuid.in_(slice_subslice_uuids) - ).delete() + )).delete() num_deletes += session.query(SliceEndPointModel)\ - .filter_by( + .filter_by(and_( SliceEndPointModel.slice_uuid == slice_uuid, SliceEndPointModel.endpoint_uuid.in_(slice_endpoint_uuids) - ).delete() + )).delete() return num_deletes > 0 updated = run_transaction(sessionmaker(bind=db_engine), callback) -- GitLab From 2325c4d10fe4654413905530f1873c6a80d1c47a Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Fri, 20 Jan 2023 18:10:09 +0000 Subject: [PATCH 096/158] Context component: - corrected slice_unset query filters --- src/context/service/database/Slice.py | 33 +++++++++++++++------------ 1 file changed, 18 insertions(+), 15 deletions(-) diff --git a/src/context/service/database/Slice.py b/src/context/service/database/Slice.py index 4c65886ab..84bfff343 100644 --- a/src/context/service/database/Slice.py +++ b/src/context/service/database/Slice.py @@ -202,21 +202,24 @@ def slice_unset(db_engine : Engine, request : Slice) -> Tuple[Dict, bool]: def callback(session : Session) -> bool: num_deletes = 0 - num_deletes += session.query(SliceServiceModel)\ - .filter(and_( - SliceServiceModel.slice_uuid == slice_uuid, - SliceServiceModel.service_uuid.in_(slice_service_uuids) - )).delete() - num_deletes += session.query(SliceSubSliceModel)\ - .filter_by(and_( - SliceSubSliceModel.slice_uuid == slice_uuid, - SliceSubSliceModel.subslice_uuid.in_(slice_subslice_uuids) - )).delete() - num_deletes += session.query(SliceEndPointModel)\ - .filter_by(and_( - SliceEndPointModel.slice_uuid == slice_uuid, - SliceEndPointModel.endpoint_uuid.in_(slice_endpoint_uuids) - )).delete() + if len(slice_service_uuids) > 0: + num_deletes += session.query(SliceServiceModel)\ + .filter(and_( + SliceServiceModel.slice_uuid == slice_uuid, + SliceServiceModel.service_uuid.in_(slice_service_uuids) + )).delete() + if len(slice_subslice_uuids) > 0: + num_deletes += session.query(SliceSubSliceModel)\ + .filter_by(and_( + SliceSubSliceModel.slice_uuid == slice_uuid, + SliceSubSliceModel.subslice_uuid.in_(slice_subslice_uuids) + )).delete() + if len(slice_endpoint_uuids) > 0: + num_deletes += session.query(SliceEndPointModel)\ + .filter_by(and_( + SliceEndPointModel.slice_uuid == slice_uuid, + SliceEndPointModel.endpoint_uuid.in_(slice_endpoint_uuids) + )).delete() return num_deletes > 0 updated = run_transaction(sessionmaker(bind=db_engine), callback) -- GitLab From 2beec0043eff17492472bf90712a396a6f049a1c Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Mon, 23 Jan 2023 08:25:46 +0000 Subject: [PATCH 097/158] Load Generator component: - defined extra-long hold time to have time to validate configurations --- src/load_generator/service/LoadGeneratorServiceServicerImpl.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/load_generator/service/LoadGeneratorServiceServicerImpl.py b/src/load_generator/service/LoadGeneratorServiceServicerImpl.py index 1fa653394..d5adb492d 100644 --- a/src/load_generator/service/LoadGeneratorServiceServicerImpl.py +++ b/src/load_generator/service/LoadGeneratorServiceServicerImpl.py @@ -38,7 +38,7 @@ class LoadGeneratorServiceServicerImpl(LoadGeneratorServiceServicer): RequestType.SLICE_L3NM, ], offered_load = 50, - holding_time = 10, + holding_time = 3600, dry_mode = False, # in dry mode, no request is sent to TeraFlowSDN record_to_dlt = False, # if record_to_dlt, changes in device/link/service/slice are uploaded to DLT dlt_domain_id = 'dlt-perf-eval', # domain used to uploaded entities, ignored when record_to_dlt = False -- GitLab From ceda32b03634191ff88c667591b7b751bdb8ce0e Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Mon, 23 Jan 2023 08:41:43 +0000 Subject: [PATCH 098/158] Load Generator component: - added feature for deactivating teardowns - added feature to support infinite-loop request generation --- src/load_generator/load_gen/Parameters.py | 6 +++++- src/load_generator/load_gen/RequestScheduler.py | 7 +++++-- .../service/LoadGeneratorServiceServicerImpl.py | 5 +++-- 3 files changed, 13 insertions(+), 5 deletions(-) diff --git a/src/load_generator/load_gen/Parameters.py b/src/load_generator/load_gen/Parameters.py index c74d18248..abe297039 100644 --- a/src/load_generator/load_gen/Parameters.py +++ b/src/load_generator/load_gen/Parameters.py @@ -17,7 +17,7 @@ from typing import List, Optional class Parameters: def __init__( self, num_requests : int, request_types : List[str], offered_load : Optional[float] = None, - inter_arrival_time : Optional[float] = None, holding_time : Optional[float] = None, + inter_arrival_time : Optional[float] = None, holding_time : Optional[float] = None, do_teardown : bool = True, dry_mode : bool = False, record_to_dlt : bool = False, dlt_domain_id : Optional[str] = None ) -> None: self._num_requests = num_requests @@ -25,6 +25,7 @@ class Parameters: self._offered_load = offered_load self._inter_arrival_time = inter_arrival_time self._holding_time = holding_time + self._do_teardown = do_teardown self._dry_mode = dry_mode self._record_to_dlt = record_to_dlt self._dlt_domain_id = dlt_domain_id @@ -58,6 +59,9 @@ class Parameters: @property def holding_time(self): return self._holding_time + @property + def do_teardown(self): return self._do_teardown + @property def dry_mode(self): return self._dry_mode diff --git a/src/load_generator/load_gen/RequestScheduler.py b/src/load_generator/load_gen/RequestScheduler.py index 408e0125f..e2a804d7f 100644 --- a/src/load_generator/load_gen/RequestScheduler.py +++ b/src/load_generator/load_gen/RequestScheduler.py @@ -48,7 +48,9 @@ class RequestScheduler: self._generator = generator def _schedule_request_setup(self) -> None: - if self._generator.num_requests_generated >= self._parameters.num_requests: + infinite_loop = self._parameters.num_requests == 0 + num_requests_generated = self._generator.num_requests_generated - 1 # because it first increases, then checks + if not infinite_loop and (num_requests_generated >= self._parameters.num_requests): LOGGER.info('Generation Done!') #self._scheduler.shutdown() return @@ -98,7 +100,8 @@ class RequestScheduler: slice_uuid, src_device_uuid, src_endpoint_uuid, dst_device_uuid, dst_endpoint_uuid) self._create_update(slice_=request) - self._schedule_request_teardown(request) + if self._parameters.do_teardown: + self._schedule_request_teardown(request) def _request_teardown(self, request : Dict) -> None: if 'service_id' in request: diff --git a/src/load_generator/service/LoadGeneratorServiceServicerImpl.py b/src/load_generator/service/LoadGeneratorServiceServicerImpl.py index d5adb492d..4957625bc 100644 --- a/src/load_generator/service/LoadGeneratorServiceServicerImpl.py +++ b/src/load_generator/service/LoadGeneratorServiceServicerImpl.py @@ -28,7 +28,7 @@ class LoadGeneratorServiceServicerImpl(LoadGeneratorServiceServicer): def __init__(self): LOGGER.debug('Creating Servicer...') self._parameters = Parameters( - num_requests = 100, + num_requests = 1, request_types = [ RequestType.SERVICE_L2NM, RequestType.SERVICE_L3NM, @@ -38,7 +38,8 @@ class LoadGeneratorServiceServicerImpl(LoadGeneratorServiceServicer): RequestType.SLICE_L3NM, ], offered_load = 50, - holding_time = 3600, + holding_time = 10, + do_teardown = False, dry_mode = False, # in dry mode, no request is sent to TeraFlowSDN record_to_dlt = False, # if record_to_dlt, changes in device/link/service/slice are uploaded to DLT dlt_domain_id = 'dlt-perf-eval', # domain used to uploaded entities, ignored when record_to_dlt = False -- GitLab From 9df866a836d9ce254b63cb71cebf98987622930c Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Mon, 23 Jan 2023 08:55:56 +0000 Subject: [PATCH 099/158] PathComp component: - activated DEBUG log level --- manifests/pathcompservice.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/manifests/pathcompservice.yaml b/manifests/pathcompservice.yaml index 71c927b56..49c3a8684 100644 --- a/manifests/pathcompservice.yaml +++ b/manifests/pathcompservice.yaml @@ -36,7 +36,7 @@ spec: - containerPort: 9192 env: - name: LOG_LEVEL - value: "INFO" + value: "DEBUG" readinessProbe: exec: command: ["/bin/grpc_health_probe", "-addr=:10020"] -- GitLab From 34374c8214808881850b74dc75c90ed8b9693921 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Mon, 23 Jan 2023 08:56:21 +0000 Subject: [PATCH 100/158] Common Context Query tools: - reduced log level of InterDomain-related methods to DEBUG --- .../tools/context_queries/InterDomain.py | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/src/common/tools/context_queries/InterDomain.py b/src/common/tools/context_queries/InterDomain.py index ab804145d..f2d9aa26d 100644 --- a/src/common/tools/context_queries/InterDomain.py +++ b/src/common/tools/context_queries/InterDomain.py @@ -34,11 +34,11 @@ DATACENTER_DEVICE_TYPES = {DeviceTypeEnum.DATACENTER, DeviceTypeEnum.EMULATED_DA def get_local_device_uuids(context_client : ContextClient) -> Set[str]: topologies = context_client.ListTopologies(ADMIN_CONTEXT_ID) topologies = {topology.topology_id.topology_uuid.uuid : topology for topology in topologies.topologies} - LOGGER.info('[get_local_device_uuids] topologies.keys()={:s}'.format(str(topologies.keys()))) + LOGGER.debug('[get_local_device_uuids] topologies.keys()={:s}'.format(str(topologies.keys()))) local_topology_uuids = set(topologies.keys()) local_topology_uuids.discard(INTERDOMAIN_TOPOLOGY_NAME) - LOGGER.info('[get_local_device_uuids] local_topology_uuids={:s}'.format(str(local_topology_uuids))) + LOGGER.debug('[get_local_device_uuids] local_topology_uuids={:s}'.format(str(local_topology_uuids))) local_device_uuids = set() @@ -52,11 +52,11 @@ def get_local_device_uuids(context_client : ContextClient) -> Set[str]: for local_topology_uuid in local_topology_uuids: topology_device_ids = topologies[local_topology_uuid].device_ids topology_device_uuids = {device_id.device_uuid.uuid for device_id in topology_device_ids} - LOGGER.info('[get_local_device_uuids] [loop] local_topology_uuid={:s} topology_device_uuids={:s}'.format( + LOGGER.debug('[get_local_device_uuids] [loop] local_topology_uuid={:s} topology_device_uuids={:s}'.format( str(local_topology_uuid), str(topology_device_uuids))) local_device_uuids.update(topology_device_uuids) - LOGGER.info('[get_local_device_uuids] local_device_uuids={:s}'.format(str(local_device_uuids))) + LOGGER.debug('[get_local_device_uuids] local_device_uuids={:s}'.format(str(local_device_uuids))) return local_device_uuids def get_interdomain_device_uuids(context_client : ContextClient) -> Set[str]: @@ -71,7 +71,7 @@ def get_interdomain_device_uuids(context_client : ContextClient) -> Set[str]: # add abstracted devices in the interdomain topology interdomain_device_ids = interdomain_topology.device_ids interdomain_device_uuids = {device_id.device_uuid.uuid for device_id in interdomain_device_ids} - LOGGER.info('[get_interdomain_device_uuids] interdomain_device_uuids={:s}'.format(str(interdomain_device_uuids))) + LOGGER.debug('[get_interdomain_device_uuids] interdomain_device_uuids={:s}'.format(str(interdomain_device_uuids))) return interdomain_device_uuids def get_local_domain_devices(context_client : ContextClient) -> List[Device]: @@ -87,7 +87,7 @@ def get_local_domain_devices(context_client : ContextClient) -> List[Device]: def is_inter_domain(context_client : ContextClient, endpoint_ids : List[EndPointId]) -> bool: interdomain_device_uuids = get_interdomain_device_uuids(context_client) - LOGGER.info('[is_inter_domain] interdomain_device_uuids={:s}'.format(str(interdomain_device_uuids))) + LOGGER.debug('[is_inter_domain] interdomain_device_uuids={:s}'.format(str(interdomain_device_uuids))) non_interdomain_endpoint_ids = [ endpoint_id for endpoint_id in endpoint_ids @@ -97,14 +97,14 @@ def is_inter_domain(context_client : ContextClient, endpoint_ids : List[EndPoint (endpoint_id.device_id.device_uuid.uuid, endpoint_id.endpoint_uuid.uuid) for endpoint_id in non_interdomain_endpoint_ids ] - LOGGER.info('[is_inter_domain] non_interdomain_endpoint_ids={:s}'.format(str(str_non_interdomain_endpoint_ids))) + LOGGER.debug('[is_inter_domain] non_interdomain_endpoint_ids={:s}'.format(str(str_non_interdomain_endpoint_ids))) is_inter_domain_ = len(non_interdomain_endpoint_ids) == 0 - LOGGER.info('[is_inter_domain] is_inter_domain={:s}'.format(str(is_inter_domain_))) + LOGGER.debug('[is_inter_domain] is_inter_domain={:s}'.format(str(is_inter_domain_))) return is_inter_domain_ def is_multi_domain(context_client : ContextClient, endpoint_ids : List[EndPointId]) -> bool: local_device_uuids = get_local_device_uuids(context_client) - LOGGER.info('[is_multi_domain] local_device_uuids={:s}'.format(str(local_device_uuids))) + LOGGER.debug('[is_multi_domain] local_device_uuids={:s}'.format(str(local_device_uuids))) remote_endpoint_ids = [ endpoint_id for endpoint_id in endpoint_ids @@ -114,9 +114,9 @@ def is_multi_domain(context_client : ContextClient, endpoint_ids : List[EndPoint (endpoint_id.device_id.device_uuid.uuid, endpoint_id.endpoint_uuid.uuid) for endpoint_id in remote_endpoint_ids ] - LOGGER.info('[is_multi_domain] remote_endpoint_ids={:s}'.format(str(str_remote_endpoint_ids))) + LOGGER.debug('[is_multi_domain] remote_endpoint_ids={:s}'.format(str(str_remote_endpoint_ids))) is_multi_domain_ = len(remote_endpoint_ids) > 0 - LOGGER.info('[is_multi_domain] is_multi_domain={:s}'.format(str(is_multi_domain_))) + LOGGER.debug('[is_multi_domain] is_multi_domain={:s}'.format(str(is_multi_domain_))) return is_multi_domain_ def compute_interdomain_path( @@ -144,9 +144,9 @@ def compute_interdomain_path( constraint_lat.custom.constraint_type = 'latency[ms]' constraint_lat.custom.constraint_value = '100.0' - LOGGER.info('pathcomp_req = {:s}'.format(grpc_message_to_json_string(pathcomp_req))) + LOGGER.debug('pathcomp_req = {:s}'.format(grpc_message_to_json_string(pathcomp_req))) pathcomp_rep = pathcomp_client.Compute(pathcomp_req) - LOGGER.info('pathcomp_rep = {:s}'.format(grpc_message_to_json_string(pathcomp_rep))) + LOGGER.debug('pathcomp_rep = {:s}'.format(grpc_message_to_json_string(pathcomp_rep))) service = next(iter([ service @@ -222,7 +222,7 @@ def compute_traversed_domains( ) -> List[Tuple[str, bool, List[EndPointId]]]: local_device_uuids = get_local_device_uuids(context_client) - LOGGER.info('[compute_traversed_domains] local_device_uuids={:s}'.format(str(local_device_uuids))) + LOGGER.debug('[compute_traversed_domains] local_device_uuids={:s}'.format(str(local_device_uuids))) interdomain_devices = get_devices_in_topology(context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_NAME) interdomain_devices = { @@ -231,7 +231,7 @@ def compute_traversed_domains( } devices_to_domains = get_device_to_domain_map(context_client) - LOGGER.info('[compute_traversed_domains] devices_to_domains={:s}'.format(str(devices_to_domains))) + LOGGER.debug('[compute_traversed_domains] devices_to_domains={:s}'.format(str(devices_to_domains))) traversed_domains : List[Tuple[str, bool, List[EndPointId]]] = list() domains_dict : Dict[str, Tuple[str, bool, List[EndPointId]]] = dict() @@ -252,5 +252,5 @@ def compute_traversed_domains( ]) for domain_uuid,is_local_domain,endpoint_ids in traversed_domains ] - LOGGER.info('[compute_traversed_domains] devices_to_domains={:s}'.format(str(str_traversed_domains))) + LOGGER.debug('[compute_traversed_domains] devices_to_domains={:s}'.format(str(str_traversed_domains))) return traversed_domains -- GitLab From eab52c6b5063def9780b3e2ff41d496825f5bd56 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Mon, 23 Jan 2023 09:35:44 +0000 Subject: [PATCH 101/158] PathComp component: - forced context/topology uuids in backend request for simplicity; to be elaborated - improved formatting of request to be sent to backend in logs --- .../frontend/service/algorithms/_Algorithm.py | 2 +- .../service/algorithms/tools/ComposeRequest.py | 18 ++++++++++++------ 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/src/pathcomp/frontend/service/algorithms/_Algorithm.py b/src/pathcomp/frontend/service/algorithms/_Algorithm.py index a24ef7693..5c49a1fec 100644 --- a/src/pathcomp/frontend/service/algorithms/_Algorithm.py +++ b/src/pathcomp/frontend/service/algorithms/_Algorithm.py @@ -93,7 +93,7 @@ class _Algorithm: def execute(self, dump_request_filename : Optional[str] = None, dump_reply_filename : Optional[str] = None) -> None: request = {'serviceList': self.service_list, 'deviceList': self.device_list, 'linkList': self.link_list} - self.logger.debug('[execute] request={:s}'.format(str(request))) + self.logger.debug('[execute] request={:s}'.format(json.dumps(request, sort_keys=True, indent=4))) if dump_request_filename is not None: with open(dump_request_filename, 'w', encoding='UTF-8') as f: f.write(json.dumps(request, sort_keys=True, indent=4)) diff --git a/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py b/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py index 0a424bf8b..8412632ed 100644 --- a/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py +++ b/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py @@ -24,18 +24,24 @@ LOGGER = logging.getLogger(__name__) LOGGER = logging.getLogger(__name__) -def compose_topology_id(topology_id : TopologyId) -> Dict: - context_uuid = topology_id.context_id.context_uuid.uuid - topology_uuid = topology_id.topology_uuid.uuid +def compose_topology_id(topology_id : TopologyId) -> Dict: # pylint: disable=unused-argument + # force context_uuid and topology_uuid to be always DEFAULT_CONTEXT_NAME and DEFAULT_TOPOLOGY_NAME for simplicity + # for interdomain, contexts and topologies are managed in particular ways - if len(context_uuid) == 0: context_uuid = DEFAULT_CONTEXT_NAME - if len(topology_uuid) == 0: topology_uuid = DEFAULT_TOPOLOGY_NAME + context_uuid = DEFAULT_CONTEXT_NAME + #context_uuid = topology_id.context_id.context_uuid.uuid + #if len(context_uuid) == 0: context_uuid = DEFAULT_CONTEXT_NAME + + topology_uuid = DEFAULT_TOPOLOGY_NAME + #topology_uuid = topology_id.topology_uuid.uuid + #if len(topology_uuid) == 0: topology_uuid = DEFAULT_TOPOLOGY_NAME return {'contextId': context_uuid, 'topology_uuid': topology_uuid} def compose_service_id(service_id : ServiceId) -> Dict: # force context_uuid to be always DEFAULT_CONTEXT_NAME for simplicity - # for interdomain contexts are managed in a particular way + # for interdomain, contexts are managed in particular ways + #context_uuid = service_id.context_id.context_uuid.uuid #if len(context_uuid) == 0: context_uuid = DEFAULT_CONTEXT_NAME context_uuid = DEFAULT_CONTEXT_NAME -- GitLab From 5a90b501e38d589e4cf668310d21c13f32e458ab Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Mon, 23 Jan 2023 09:57:46 +0000 Subject: [PATCH 102/158] PathComp component: - deactivated DEBUG log level --- manifests/pathcompservice.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/manifests/pathcompservice.yaml b/manifests/pathcompservice.yaml index 49c3a8684..71c927b56 100644 --- a/manifests/pathcompservice.yaml +++ b/manifests/pathcompservice.yaml @@ -36,7 +36,7 @@ spec: - containerPort: 9192 env: - name: LOG_LEVEL - value: "DEBUG" + value: "INFO" readinessProbe: exec: command: ["/bin/grpc_health_probe", "-addr=:10020"] -- GitLab From 6be7bd98577d4a0b2991d4614013533deae24f51 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Mon, 23 Jan 2023 09:58:05 +0000 Subject: [PATCH 103/158] PathComp component: - minor code cleanup --- .../frontend/service/algorithms/tools/ComposeRequest.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py b/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py index 8412632ed..0832615a1 100644 --- a/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py +++ b/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py @@ -22,8 +22,6 @@ from .ConstantsMappings import ( LOGGER = logging.getLogger(__name__) -LOGGER = logging.getLogger(__name__) - def compose_topology_id(topology_id : TopologyId) -> Dict: # pylint: disable=unused-argument # force context_uuid and topology_uuid to be always DEFAULT_CONTEXT_NAME and DEFAULT_TOPOLOGY_NAME for simplicity # for interdomain, contexts and topologies are managed in particular ways -- GitLab From d8a8c635041fb3e68b2ef71535aefae1548d35cb Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Mon, 23 Jan 2023 09:58:33 +0000 Subject: [PATCH 104/158] WebUI component: - added human-readable names to Topology graph --- src/webui/service/main/routes.py | 6 ++++-- src/webui/service/templates/js/topology.js | 4 ++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/src/webui/service/main/routes.py b/src/webui/service/main/routes.py index 02706c858..30ed52911 100644 --- a/src/webui/service/main/routes.py +++ b/src/webui/service/main/routes.py @@ -136,7 +136,7 @@ def topology(): if device.device_id.device_uuid.uuid not in topo_device_uuids: continue devices.append({ 'id': device.device_id.device_uuid.uuid, - 'name': device.device_id.device_uuid.uuid, + 'name': device.name, 'type': device.device_type, }) @@ -150,13 +150,15 @@ def topology(): continue links.append({ 'id': link.link_id.link_uuid.uuid, + 'name': link.name, 'source': link.link_endpoint_ids[0].device_id.device_uuid.uuid, 'target': link.link_endpoint_ids[1].device_id.device_uuid.uuid, }) return jsonify({'devices': devices, 'links': links}) - except: + except: # pylint: disable=bare-except LOGGER.exception('Error retrieving topology') + return jsonify({'devices': [], 'links': []}) finally: context_client.close() diff --git a/src/webui/service/templates/js/topology.js b/src/webui/service/templates/js/topology.js index 29156224d..adcabf62c 100644 --- a/src/webui/service/templates/js/topology.js +++ b/src/webui/service/templates/js/topology.js @@ -88,9 +88,9 @@ d3.json("{{ url_for('main.topology') }}", function(data) { .call(d3.drag().on("start", dragstarted).on("drag", dragged).on("end", dragended)); // node tooltip - node.append("title").text(function(d) { return d.id; }); + node.append("title").text(function(n) { return n.name + ' (' + n.id + ')'; }); // link tooltip - link.append("title").text(function(d) { return d.id; }); + link.append("title").text(function(l) { return l.name + ' (' + l.id + ')'; }); // link style link -- GitLab From febe0556f289be96fccea3c09cf2a88efd84b91a Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Tue, 24 Jan 2023 12:26:37 +0000 Subject: [PATCH 105/158] Common - Context Queries: - added get_device helper method --- src/common/tools/context_queries/Device.py | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/src/common/tools/context_queries/Device.py b/src/common/tools/context_queries/Device.py index e5b205d46..ed8772cf6 100644 --- a/src/common/tools/context_queries/Device.py +++ b/src/common/tools/context_queries/Device.py @@ -12,11 +12,28 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import List, Set -from common.proto.context_pb2 import ContextId, Device, Empty, Topology, TopologyId +import grpc, logging +from typing import List, Optional, Set +from common.proto.context_pb2 import ContextId, Device, DeviceId, Empty, Topology, TopologyId from common.tools.object_factory.Topology import json_topology_id from context.client.ContextClient import ContextClient +LOGGER = logging.getLogger(__name__) + +def get_device(context_client : ContextClient, device_uuid : str, rw_copy : bool = False) -> Optional[Device]: + try: + # pylint: disable=no-member + device_id = DeviceId() + device_id.device_uuid.uuid = device_uuid + ro_device = context_client.GetDevice(device_id) + if not rw_copy: return ro_device + rw_device = Device() + rw_device.CopyFrom(ro_device) + return rw_device + except grpc.RpcError: + #LOGGER.exception('Unable to get Device({:s})'.format(str(device_uuid))) + return None + def get_existing_device_uuids(context_client : ContextClient) -> Set[str]: existing_device_ids = context_client.ListDeviceIds(Empty()) existing_device_uuids = {device_id.device_uuid.uuid for device_id in existing_device_ids.device_ids} -- GitLab From ce5a8ab0881da92ab168cf8f12bae899123a2025 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Tue, 24 Jan 2023 12:31:25 +0000 Subject: [PATCH 106/158] Device component: - removed internal in-memory database - added storage of connect-related config rules in context and added driver pre-loading when Device component starts - re-organized code of EmulatedDriver - re-organized code to improve clarity - minor code bug resolutions - code cleanup --- src/device/service/DeviceService.py | 10 +- .../service/DeviceServiceServicerImpl.py | 466 ++++-------------- src/device/service/MonitoringLoops.py | 153 ------ src/device/service/Tools.py | 286 +++++++++++ src/device/service/__main__.py | 5 +- src/device/service/database/ConfigModel.py | 122 ----- src/device/service/database/ContextModel.py | 38 -- src/device/service/database/DatabaseTools.py | 127 ----- src/device/service/database/DeviceModel.py | 106 ---- src/device/service/database/EndPointModel.py | 79 --- src/device/service/database/KpiModel.py | 59 --- src/device/service/database/KpiSampleType.py | 28 -- src/device/service/database/Tools.py | 72 --- src/device/service/database/TopologyModel.py | 39 -- .../service/driver_api/DriverInstanceCache.py | 47 +- src/device/service/driver_api/FilterFields.py | 12 +- src/device/service/driver_api/Tools.py | 71 --- .../emulated/Constants.py} | 18 +- .../drivers/emulated/EmulatedDriver.py | 106 +--- .../emulated/SyntheticSamplingParameters.py | 86 ++++ src/device/service/drivers/emulated/Tools.py | 46 ++ .../drivers/openconfig/templates/EndPoints.py | 10 +- .../service/monitoring/MonitoringLoop.py | 43 ++ .../service/monitoring/MonitoringLoops.py | 170 +++++++ .../{database => monitoring}/__init__.py | 2 - 25 files changed, 798 insertions(+), 1403 deletions(-) delete mode 100644 src/device/service/MonitoringLoops.py create mode 100644 src/device/service/Tools.py delete mode 100644 src/device/service/database/ConfigModel.py delete mode 100644 src/device/service/database/ContextModel.py delete mode 100644 src/device/service/database/DatabaseTools.py delete mode 100644 src/device/service/database/DeviceModel.py delete mode 100644 src/device/service/database/EndPointModel.py delete mode 100644 src/device/service/database/KpiModel.py delete mode 100644 src/device/service/database/KpiSampleType.py delete mode 100644 src/device/service/database/Tools.py delete mode 100644 src/device/service/database/TopologyModel.py delete mode 100644 src/device/service/driver_api/Tools.py rename src/device/service/{database/RelationModels.py => drivers/emulated/Constants.py} (55%) create mode 100644 src/device/service/drivers/emulated/SyntheticSamplingParameters.py create mode 100644 src/device/service/drivers/emulated/Tools.py create mode 100644 src/device/service/monitoring/MonitoringLoop.py create mode 100644 src/device/service/monitoring/MonitoringLoops.py rename src/device/service/{database => monitoring}/__init__.py (75%) diff --git a/src/device/service/DeviceService.py b/src/device/service/DeviceService.py index 59134f26d..ca165a200 100644 --- a/src/device/service/DeviceService.py +++ b/src/device/service/DeviceService.py @@ -14,14 +14,11 @@ from common.Constants import ServiceNameEnum from common.Settings import get_service_port_grpc -from common.orm.backend.BackendEnum import BackendEnum -from common.orm.Database import Database -from common.orm.Factory import get_database_backend from common.proto.device_pb2_grpc import add_DeviceServiceServicer_to_server from common.tools.service.GenericGrpcService import GenericGrpcService from .driver_api.DriverInstanceCache import DriverInstanceCache from .DeviceServiceServicerImpl import DeviceServiceServicerImpl -from .MonitoringLoops import MonitoringLoops +from .monitoring.MonitoringLoops import MonitoringLoops # Custom gRPC settings # Multiple clients might keep connections alive waiting for RPC methods to be executed. @@ -32,9 +29,8 @@ class DeviceService(GenericGrpcService): def __init__(self, driver_instance_cache : DriverInstanceCache, cls_name: str = __name__) -> None: port = get_service_port_grpc(ServiceNameEnum.DEVICE) super().__init__(port, max_workers=GRPC_MAX_WORKERS, cls_name=cls_name) - database = Database(get_database_backend(backend=BackendEnum.INMEMORY)) - self.monitoring_loops = MonitoringLoops(database) - self.device_servicer = DeviceServiceServicerImpl(database, driver_instance_cache, self.monitoring_loops) + self.monitoring_loops = MonitoringLoops() + self.device_servicer = DeviceServiceServicerImpl(driver_instance_cache, self.monitoring_loops) def install_servicers(self): self.monitoring_loops.start() diff --git a/src/device/service/DeviceServiceServicerImpl.py b/src/device/service/DeviceServiceServicerImpl.py index 88f49de6f..9d0f9bd3e 100644 --- a/src/device/service/DeviceServiceServicerImpl.py +++ b/src/device/service/DeviceServiceServicerImpl.py @@ -12,47 +12,31 @@ # See the License for the specific language governing permissions and # limitations under the License. -import grpc, json, logging, re -from typing import Any, Dict, List, Tuple +import grpc, logging from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method -from common.method_wrappers.ServiceExceptions import InvalidArgumentException, OperationFailedException -from common.orm.Database import Database -from common.orm.HighLevel import get_object, update_or_create_object -from common.orm.backend.Tools import key_to_str -from common.proto.context_pb2 import ConfigActionEnum, Device, DeviceConfig, DeviceId, Empty +from common.method_wrappers.ServiceExceptions import NotFoundException, OperationFailedException +from common.proto.context_pb2 import Device, DeviceConfig, DeviceId, Empty from common.proto.device_pb2 import MonitoringSettings from common.proto.device_pb2_grpc import DeviceServiceServicer -from common.proto.kpi_sample_types_pb2 import KpiSampleType -from common.tools.grpc.Tools import grpc_message_to_json +from common.tools.context_queries.Device import get_device from common.tools.mutex_queues.MutexQueues import MutexQueues from context.client.ContextClient import ContextClient -from .database.ConfigModel import ( - ConfigModel, ConfigRuleModel, ORM_ConfigActionEnum, get_config_rules, grpc_config_rules_to_raw, update_config) -from .database.DatabaseTools import ( - delete_device_from_context, get_device_driver_filter_fields, sync_device_from_context, sync_device_to_context, - update_device_in_local_database) -from .database.DeviceModel import DeviceModel, DriverModel -from .database.EndPointModel import EndPointModel, EndPointMonitorModel -from .database.KpiModel import KpiModel -from .database.KpiSampleType import ORM_KpiSampleTypeEnum, grpc_to_enum__kpi_sample_type -from .database.RelationModels import EndPointMonitorKpiModel -from .driver_api._Driver import _Driver, RESOURCE_ENDPOINTS #, RESOURCE_INTERFACES, RESOURCE_NETWORK_INSTANCES -from .driver_api.DriverInstanceCache import DriverInstanceCache -from .driver_api.Tools import ( - check_delete_errors, check_set_errors, check_subscribe_errors, check_unsubscribe_errors) -from .MonitoringLoops import MonitoringLoops +from .driver_api._Driver import _Driver +from .driver_api.DriverInstanceCache import DriverInstanceCache, get_driver +from .monitoring.MonitoringLoops import MonitoringLoops +from .Tools import ( + check_connect_rules, check_no_endpoints, compute_rules_to_add_delete, configure_rules, deconfigure_rules, + populate_config_rules, populate_endpoints, populate_initial_config_rules, subscribe_kpi, unsubscribe_kpi) LOGGER = logging.getLogger(__name__) METRICS_POOL = MetricsPool('Device', 'RPC') +ERROR_MISSING_DRIVER = 'Device({:s}) has not been added to this Device instance' + class DeviceServiceServicerImpl(DeviceServiceServicer): - def __init__( - self, database : Database, driver_instance_cache : DriverInstanceCache, monitoring_loops : MonitoringLoops - ) -> None: + def __init__(self, driver_instance_cache : DriverInstanceCache, monitoring_loops : MonitoringLoops) -> None: LOGGER.debug('Creating Servicer...') - self.context_client = ContextClient() - self.database = database self.driver_instance_cache = driver_instance_cache self.monitoring_loops = monitoring_loops self.mutex_queues = MutexQueues() @@ -63,114 +47,36 @@ class DeviceServiceServicerImpl(DeviceServiceServicer): device_id = request.device_id device_uuid = device_id.device_uuid.uuid - connection_config_rules = {} - unexpected_config_rules = [] - for config_rule in request.device_config.config_rules: - if (config_rule.action == ConfigActionEnum.CONFIGACTION_SET) and \ - (config_rule.WhichOneof('config_rule') == 'custom') and \ - (config_rule.custom.resource_key.startswith('_connect/')): - connection_config_rules[ - config_rule.custom.resource_key.replace('_connect/', '') - ] = config_rule.custom.resource_value - else: - unexpected_config_rules.append(config_rule) - if len(unexpected_config_rules) > 0: - unexpected_config_rules = grpc_message_to_json(request.device_config) - unexpected_config_rules = unexpected_config_rules['config_rules'] - unexpected_config_rules = list(filter( - lambda cr: cr.get('custom', {})['resource_key'].replace('_connect/', '') not in connection_config_rules, - unexpected_config_rules)) - str_unexpected_config_rules = json.dumps(unexpected_config_rules, sort_keys=True) - raise InvalidArgumentException( - 'device.device_config.config_rules', str_unexpected_config_rules, - extra_details='RPC method AddDevice only accepts connection Config Rules that should start '\ - 'with "_connect/" tag. Others should be configured after adding the device.') - - if len(request.device_endpoints) > 0: - unexpected_endpoints = [] - for device_endpoint in request.device_endpoints: - unexpected_endpoints.append(grpc_message_to_json(device_endpoint)) - str_unexpected_endpoints = json.dumps(unexpected_endpoints, sort_keys=True) - raise InvalidArgumentException( - 'device.device_endpoints', str_unexpected_endpoints, - extra_details='RPC method AddDevice does not accept Endpoints. Endpoints are discovered through '\ - 'interrogation of the physical device.') - - # Remove device configuration - json_request = grpc_message_to_json(request, use_integers_for_enums=True) - json_request['device_config'] = {} - request = Device(**json_request) + connection_config_rules = check_connect_rules(request.device_config) + check_no_endpoints(request.device_endpoints) + + context_client = ContextClient() + device = get_device(context_client, device_uuid, rw_copy=True) + if device is None: + # not in context, create from request + device = Device() + device.CopyFrom(request) self.mutex_queues.wait_my_turn(device_uuid) try: - sync_device_from_context(device_uuid, self.context_client, self.database) - db_device,_ = update_device_in_local_database(self.database, request) - - driver_filter_fields = get_device_driver_filter_fields(db_device) - - #LOGGER.info('[AddDevice] connection_config_rules = {:s}'.format(str(connection_config_rules))) - address = connection_config_rules.pop('address', None) - port = connection_config_rules.pop('port', None) - settings = connection_config_rules.pop('settings', '{}') - try: - settings = json.loads(settings) - except ValueError as e: - raise InvalidArgumentException( - 'device.device_config.config_rules[settings]', settings, - extra_details='_connect/settings Config Rules provided cannot be decoded as JSON dictionary.') from e - driver : _Driver = self.driver_instance_cache.get( - device_uuid, filter_fields=driver_filter_fields, address=address, port=port, settings=settings) - driver.Connect() - - endpoints = driver.GetConfig([RESOURCE_ENDPOINTS]) - try: - for resource_key, resource_value in endpoints: - if isinstance(resource_value, Exception): - LOGGER.error('Error retrieving "{:s}": {:s}'.format(str(RESOURCE_ENDPOINTS), str(resource_value))) - continue - endpoint_uuid = resource_value.get('uuid') - endpoint_type = resource_value.get('type') - str_endpoint_key = key_to_str([device_uuid, endpoint_uuid]) - db_endpoint, _ = update_or_create_object( - self.database, EndPointModel, str_endpoint_key, { - 'device_fk' : db_device, - 'endpoint_uuid': endpoint_uuid, - 'endpoint_type': endpoint_type, - 'resource_key' : resource_key, - }) - sample_types : Dict[int, str] = resource_value.get('sample_types', {}) - for sample_type, monitor_resource_key in sample_types.items(): - str_endpoint_monitor_key = key_to_str([str_endpoint_key, str(sample_type)]) - update_or_create_object(self.database, EndPointMonitorModel, str_endpoint_monitor_key, { - 'endpoint_fk' : db_endpoint, - 'resource_key' : monitor_resource_key, - 'kpi_sample_type': grpc_to_enum__kpi_sample_type(sample_type), - }) - except: # pylint: disable=bare-except - LOGGER.exception('[AddDevice] endpoints = {:s}'.format(str(endpoints))) - - raw_running_config_rules = driver.GetConfig() - running_config_rules = [] - for resource_key, resource_value in raw_running_config_rules: - if isinstance(resource_value, Exception): - msg = 'Error retrieving config rules: {:s} => {:s}' - LOGGER.error(msg.format(str(resource_key), str(resource_value))) - continue - config_rule = (ORM_ConfigActionEnum.SET, resource_key, json.dumps(resource_value, sort_keys=True)) - running_config_rules.append(config_rule) - - #for running_config_rule in running_config_rules: - # LOGGER.info('[AddDevice] running_config_rule: {:s}'.format(str(running_config_rule))) - update_config(self.database, device_uuid, 'running', running_config_rules) - - initial_config_rules = driver.GetInitialConfig() - update_config(self.database, device_uuid, 'initial', initial_config_rules) - - #LOGGER.info('[AddDevice] db_device = {:s}'.format(str(db_device.dump( - # include_config_rules=True, include_drivers=True, include_endpoints=True)))) - - sync_device_to_context(db_device, self.context_client) - return DeviceId(**db_device.dump_id()) + driver : _Driver = get_driver(self.driver_instance_cache, device) + + errors = [] + + if len(device.device_endpoints) == 0: + # created from request, populate endpoints using driver + errors.extend(populate_endpoints(device, driver, self.monitoring_loops)) + + if len(device.device_config.config_rules) == len(connection_config_rules): + # created from request, populate config rules using driver + errors.extend(populate_config_rules(device, driver)) + + if len(errors) > 0: + for error in errors: LOGGER.error(error) + raise OperationFailedException('AddDevice', extra_details=errors) + + device_id = context_client.SetDevice(device) + return device_id finally: self.mutex_queues.signal_done(device_uuid) @@ -181,107 +87,52 @@ class DeviceServiceServicerImpl(DeviceServiceServicer): self.mutex_queues.wait_my_turn(device_uuid) try: - sync_device_from_context(device_uuid, self.context_client, self.database) - - context_config_rules = get_config_rules(self.database, device_uuid, 'running') - context_config_rules = {config_rule[1]: config_rule[2] for config_rule in context_config_rules} - #LOGGER.info('[ConfigureDevice] context_config_rules = {:s}'.format(str(context_config_rules))) - - db_device,_ = update_device_in_local_database(self.database, request) - - request_config_rules = grpc_config_rules_to_raw(request.device_config.config_rules) - #LOGGER.info('[ConfigureDevice] request_config_rules = {:s}'.format(str(request_config_rules))) - - resources_to_set : List[Tuple[str, Any]] = [] # key, value - resources_to_delete : List[Tuple[str, Any]] = [] # key, value - - for config_rule in request_config_rules: - action, key, value = config_rule - if action == ORM_ConfigActionEnum.SET: - if (key not in context_config_rules) or (context_config_rules[key] != value): - resources_to_set.append((key, value)) - elif action == ORM_ConfigActionEnum.DELETE: - if key in context_config_rules: - resources_to_delete.append((key, value)) - - #LOGGER.info('[ConfigureDevice] resources_to_set = {:s}'.format(str(resources_to_set))) - #LOGGER.info('[ConfigureDevice] resources_to_delete = {:s}'.format(str(resources_to_delete))) - - # TODO: use of datastores (might be virtual ones) to enable rollbacks - - errors = [] + context_client = ContextClient() + device = get_device(context_client, device_uuid, rw_copy=True) + if device is None: + raise NotFoundException('Device', device_uuid, extra_details='loading in ConfigureDevice') driver : _Driver = self.driver_instance_cache.get(device_uuid) if driver is None: - errors.append('Device({:s}) has not been added to this Device instance'.format(str(device_uuid))) + msg = ERROR_MISSING_DRIVER.format(str(device_uuid)) + raise OperationFailedException('ConfigureDevice', extra_details=msg) - if len(errors) == 0: - results_setconfig = driver.SetConfig(resources_to_set) - errors.extend(check_set_errors(resources_to_set, results_setconfig)) + # TODO: use of datastores (might be virtual ones) to enable rollbacks + resources_to_set, resources_to_delete = compute_rules_to_add_delete(device, request) - if len(errors) == 0: - results_deleteconfig = driver.DeleteConfig(resources_to_delete) - errors.extend(check_delete_errors(resources_to_delete, results_deleteconfig)) + errors = [] + errors.extend(configure_rules(device, driver, resources_to_set)) + errors.extend(deconfigure_rules(device, driver, resources_to_delete)) if len(errors) > 0: + for error in errors: LOGGER.error(error) raise OperationFailedException('ConfigureDevice', extra_details=errors) - running_config_rules = driver.GetConfig() - running_config_rules = [ - (ORM_ConfigActionEnum.SET, config_rule[0], json.dumps(config_rule[1], sort_keys=True)) - for config_rule in running_config_rules if not isinstance(config_rule[1], Exception) - ] - #for running_config_rule in running_config_rules: - # LOGGER.info('[ConfigureDevice] running_config_rule: {:s}'.format(str(running_config_rule))) - update_config(self.database, device_uuid, 'running', running_config_rules) - - sync_device_to_context(db_device, self.context_client) - return DeviceId(**db_device.dump_id()) + # Rules updated by configure_rules() and deconfigure_rules() methods. + # Code to be removed soon if not needed. + #running_config_rules = driver.GetConfig() + #for config_rule in running_config_rules: + # if isinstance(config_rule[1], Exception): continue + # config_rule = device.device_config.config_rules.add() + # config_rule.action = ConfigActionEnum.CONFIGACTION_SET + # config_rule.custom.resource_key = config_rule[0] + # config_rule.custom.resource_value = json.dumps(config_rule[1], sort_keys=True) + + device_id = context_client.SetDevice(device) + return device_id finally: self.mutex_queues.signal_done(device_uuid) - @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def DeleteDevice(self, request : DeviceId, context : grpc.ServicerContext) -> Empty: device_uuid = request.device_uuid.uuid self.mutex_queues.wait_my_turn(device_uuid) try: - self.monitoring_loops.remove(device_uuid) - - sync_device_from_context(device_uuid, self.context_client, self.database) - db_device : DeviceModel = get_object(self.database, DeviceModel, device_uuid, raise_if_not_found=False) - if db_device is None: return Empty() - + context_client = ContextClient() + self.monitoring_loops.remove_device(device_uuid) self.driver_instance_cache.delete(device_uuid) - delete_device_from_context(db_device, self.context_client) - - for db_kpi_pk,_ in db_device.references(KpiModel): - db_kpi = get_object(self.database, KpiModel, db_kpi_pk) - for db_endpoint_monitor_kpi_pk,_ in db_kpi.references(EndPointMonitorKpiModel): - get_object(self.database, EndPointMonitorKpiModel, db_endpoint_monitor_kpi_pk).delete() - db_kpi.delete() - - for db_endpoint_pk,_ in db_device.references(EndPointModel): - db_endpoint = EndPointModel(self.database, db_endpoint_pk) - for db_endpoint_monitor_pk,_ in db_endpoint.references(EndPointMonitorModel): - get_object(self.database, EndPointMonitorModel, db_endpoint_monitor_pk).delete() - db_endpoint.delete() - - for db_driver_pk,_ in db_device.references(DriverModel): - get_object(self.database, DriverModel, db_driver_pk).delete() - - db_initial_config = ConfigModel(self.database, db_device.device_initial_config_fk) - for db_config_rule_pk,_ in db_initial_config.references(ConfigRuleModel): - get_object(self.database, ConfigRuleModel, db_config_rule_pk).delete() - - db_running_config = ConfigModel(self.database, db_device.device_running_config_fk) - for db_config_rule_pk,_ in db_running_config.references(ConfigRuleModel): - get_object(self.database, ConfigRuleModel, db_config_rule_pk).delete() - - db_device.delete() - db_initial_config.delete() - db_running_config.delete() + context_client.RemoveDevice(request) return Empty() finally: self.mutex_queues.signal_done(device_uuid) @@ -292,177 +143,38 @@ class DeviceServiceServicerImpl(DeviceServiceServicer): self.mutex_queues.wait_my_turn(device_uuid) try: - sync_device_from_context(device_uuid, self.context_client, self.database) - db_device : DeviceModel = get_object(self.database, DeviceModel, device_uuid, raise_if_not_found=False) + driver : _Driver = self.driver_instance_cache.get(device_uuid) + if driver is None: + msg = ERROR_MISSING_DRIVER.format(str(device_uuid)) + raise OperationFailedException('GetInitialConfig', extra_details=msg) + + device_config = DeviceConfig() + errors = populate_initial_config_rules(device_uuid, device_config, driver) + + if len(errors) > 0: + for error in errors: LOGGER.error(error) + raise OperationFailedException('GetInitialConfig', extra_details=errors) - config_rules = {} if db_device is None else db_device.dump_initial_config() - device_config = DeviceConfig(config_rules=config_rules) return device_config finally: self.mutex_queues.signal_done(device_uuid) @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def MonitorDeviceKpi(self, request : MonitoringSettings, context : grpc.ServicerContext) -> Empty: - kpi_uuid = request.kpi_id.kpi_id.uuid device_uuid = request.kpi_descriptor.device_id.device_uuid.uuid + subscribe = (request.sampling_duration_s > 0.0) and (request.sampling_interval_s > 0.0) + manage_kpi_method = subscribe_kpi if subscribe else unsubscribe_kpi + self.mutex_queues.wait_my_turn(device_uuid) try: - subscribe = (request.sampling_duration_s > 0.0) and (request.sampling_interval_s > 0.0) - if subscribe: - db_device : DeviceModel = get_object(self.database, DeviceModel, device_uuid, raise_if_not_found=False) - if db_device is None: - msg = 'Device({:s}) has not been added to this Device instance.'.format(str(device_uuid)) - raise OperationFailedException('MonitorDeviceKpi', extra_details=msg) - - endpoint_id = request.kpi_descriptor.endpoint_id - endpoint_uuid = endpoint_id.endpoint_uuid.uuid - str_endpoint_key = key_to_str([device_uuid, endpoint_uuid]) - endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid - endpoint_topology_uuid = endpoint_id.topology_id.topology_uuid.uuid - if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0: - str_topology_key = key_to_str([endpoint_topology_context_uuid, endpoint_topology_uuid]) - str_endpoint_key = key_to_str([str_endpoint_key, str_topology_key], separator=':') - db_endpoint : EndPointModel = get_object( - self.database, EndPointModel, str_endpoint_key, raise_if_not_found=False) - if db_endpoint is None: - msg = 'Device({:s})/EndPoint({:s}) not found. EndPointKey({:s})'.format( - str(device_uuid), str(endpoint_uuid), str(str_endpoint_key)) - raise OperationFailedException('MonitorDeviceKpi', extra_details=msg) - - driver : _Driver = self.driver_instance_cache.get(device_uuid) - if driver is None: - msg = 'Device({:s}) has not been added to this Device instance'.format(str(device_uuid)) - raise OperationFailedException('MonitorDeviceKpi', extra_details=msg) - - sample_type = request.kpi_descriptor.kpi_sample_type - - attributes = { - 'kpi_uuid' : request.kpi_id.kpi_id.uuid, - 'kpi_description' : request.kpi_descriptor.kpi_description, - 'kpi_sample_type' : grpc_to_enum__kpi_sample_type(sample_type), - 'device_fk' : db_device, - 'endpoint_fk' : db_endpoint, - 'sampling_duration': request.sampling_duration_s, - 'sampling_interval': request.sampling_interval_s, - } - result : Tuple[KpiModel, bool] = update_or_create_object(self.database, KpiModel, kpi_uuid, attributes) - db_kpi, updated = result - - str_endpoint_monitor_key = key_to_str([str_endpoint_key, str(sample_type)]) - db_endpoint_monitor : EndPointMonitorModel = get_object( - self.database, EndPointMonitorModel, str_endpoint_monitor_key, raise_if_not_found=False) - if db_endpoint_monitor is None: - msg = 'SampleType({:s}/{:s}) not supported for Device({:s})/EndPoint({:s}).'.format( - str(sample_type), str(KpiSampleType.Name(sample_type).upper().replace('KPISAMPLETYPE_', '')), - str(device_uuid), str(endpoint_uuid)) - raise OperationFailedException('MonitorDeviceKpi', extra_details=msg) - - endpoint_monitor_resource_key = re.sub('[^A-Za-z0-9]', '.', db_endpoint_monitor.resource_key) - str_endpoint_monitor_kpi_key = key_to_str([device_uuid, endpoint_monitor_resource_key], separator=':') - attributes = { - 'endpoint_monitor_fk': db_endpoint_monitor, - 'kpi_fk' : db_kpi, - } - result : Tuple[EndPointMonitorKpiModel, bool] = update_or_create_object( - self.database, EndPointMonitorKpiModel, str_endpoint_monitor_kpi_key, attributes) - db_endpoint_monitor_kpi, updated = result - - resources_to_subscribe : List[Tuple[str, float, float]] = [] # key, sampling_duration, sampling_interval - resources_to_subscribe.append( - (db_endpoint_monitor.resource_key, db_kpi.sampling_duration, db_kpi.sampling_interval)) - results_subscribestate = driver.SubscribeState(resources_to_subscribe) - errors = check_subscribe_errors(resources_to_subscribe, results_subscribestate) - if len(errors) > 0: raise OperationFailedException('MonitorDeviceKpi', extra_details=errors) - - self.monitoring_loops.add(device_uuid, driver) - - else: - db_kpi : KpiModel = get_object( - self.database, KpiModel, kpi_uuid, raise_if_not_found=False) - if db_kpi is None: - msg = 'Kpi({:s}) not found'.format(str(kpi_uuid)) - raise OperationFailedException('MonitorDeviceKpi', extra_details=msg) - - db_device : DeviceModel = get_object( - self.database, DeviceModel, db_kpi.device_fk, raise_if_not_found=False) - if db_device is None: - msg = 'Device({:s}) not found'.format(str(db_kpi.device_fk)) - raise OperationFailedException('MonitorDeviceKpi', extra_details=msg) - device_uuid = db_device.device_uuid - - db_endpoint : EndPointModel = get_object( - self.database, EndPointModel, db_kpi.endpoint_fk, raise_if_not_found=False) - if db_endpoint is None: - msg = 'EndPoint({:s}) not found'.format(str(db_kpi.endpoint_fk)) - raise OperationFailedException('MonitorDeviceKpi', extra_details=msg) - endpoint_uuid = db_endpoint.endpoint_uuid - str_endpoint_key = db_endpoint.pk - - kpi_sample_type : ORM_KpiSampleTypeEnum = db_kpi.kpi_sample_type - sample_type = kpi_sample_type.value - str_endpoint_monitor_key = key_to_str([str_endpoint_key, str(sample_type)]) - db_endpoint_monitor : EndPointMonitorModel = get_object( - self.database, EndPointMonitorModel, str_endpoint_monitor_key, raise_if_not_found=False) - if db_endpoint_monitor is None: - msg = 'EndPointMonitor({:s}) not found.'.format(str(str_endpoint_monitor_key)) - raise OperationFailedException('MonitorDeviceKpi', extra_details=msg) - - endpoint_monitor_resource_key = re.sub('[^A-Za-z0-9]', '.', db_endpoint_monitor.resource_key) - str_endpoint_monitor_kpi_key = key_to_str([device_uuid, endpoint_monitor_resource_key], separator=':') - db_endpoint_monitor_kpi : EndPointMonitorKpiModel = get_object( - self.database, EndPointMonitorKpiModel, str_endpoint_monitor_kpi_key, raise_if_not_found=False) - if db_endpoint_monitor_kpi is None: - msg = 'EndPointMonitorKpi({:s}) not found.'.format(str(str_endpoint_monitor_kpi_key)) - raise OperationFailedException('MonitorDeviceKpi', extra_details=msg) - - resources_to_unsubscribe : List[Tuple[str, float, float]] = [] # key, sampling_duration, sampling_interval - resources_to_unsubscribe.append( - (db_endpoint_monitor.resource_key, db_kpi.sampling_duration, db_kpi.sampling_interval)) - - driver : _Driver = self.driver_instance_cache.get(device_uuid) - if driver is None: - msg = 'Device({:s}) has not been added to this Device instance'.format(str(device_uuid)) - raise OperationFailedException('MonitorDeviceKpi', extra_details=msg) - - results_unsubscribestate = driver.UnsubscribeState(resources_to_unsubscribe) - errors = check_unsubscribe_errors(resources_to_unsubscribe, results_unsubscribestate) - if len(errors) > 0: raise OperationFailedException('MonitorDeviceKpi', extra_details=errors) - - db_endpoint_monitor_kpi.delete() - db_kpi.delete() - - # There is one monitoring loop per device; keep them active since they are re-used by different monitoring - # requests. - #self.monitoring_loops.remove(device_uuid) - - # Subscriptions are not stored as classical driver config. - # TODO: consider adding it somehow in the configuration. - # Warning: GetConfig might be very slow in OpenConfig devices - #running_config_rules = [ - # (config_rule[0], json.dumps(config_rule[1], sort_keys=True)) - # for config_rule in driver.GetConfig() - #] - #context_config_rules = { - # config_rule[1]: config_rule[2] - # for config_rule in get_config_rules(self.database, device_uuid, 'running') - #} - - ## each in context, not in running => delete in context - ## each in running, not in context => add to context - ## each in context and in running, context.value != running.value => update in context - #running_config_rules_actions : List[Tuple[ORM_ConfigActionEnum, str, str]] = [] - #for config_rule_key,config_rule_value in running_config_rules: - # running_config_rules_actions.append((ORM_ConfigActionEnum.SET, config_rule_key, config_rule_value)) - # context_config_rules.pop(config_rule_key, None) - #for context_rule_key,context_rule_value in context_config_rules.items(): - # running_config_rules_actions.append((ORM_ConfigActionEnum.DELETE, context_rule_key, context_rule_value)) - - ##msg = '[MonitorDeviceKpi] running_config_rules_action[{:d}]: {:s}' - ##for i,running_config_rules_action in enumerate(running_config_rules_actions): - ## LOGGER.info(msg.format(i, str(running_config_rules_action))) - #update_config(self.database, device_uuid, 'running', running_config_rules_actions) - - sync_device_to_context(db_device, self.context_client) + driver : _Driver = self.driver_instance_cache.get(device_uuid) + if driver is None: + msg = ERROR_MISSING_DRIVER.format(str(device_uuid)) + raise OperationFailedException('MonitorDeviceKpi', extra_details=msg) + + errors = manage_kpi_method(request, driver, self.monitoring_loops) + if len(errors) > 0: raise OperationFailedException('MonitorDeviceKpi', extra_details=errors) + return Empty() finally: self.mutex_queues.signal_done(device_uuid) diff --git a/src/device/service/MonitoringLoops.py b/src/device/service/MonitoringLoops.py deleted file mode 100644 index 18faed0d5..000000000 --- a/src/device/service/MonitoringLoops.py +++ /dev/null @@ -1,153 +0,0 @@ -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging, queue, re, threading -from datetime import datetime -from typing import Dict -from common.orm.Database import Database -from common.orm.HighLevel import get_object -from common.orm.backend.Tools import key_to_str -from common.proto.monitoring_pb2 import Kpi -from monitoring.client.MonitoringClient import MonitoringClient -from .database.KpiModel import KpiModel -from .database.RelationModels import EndPointMonitorKpiModel -from .driver_api._Driver import _Driver - -LOGGER = logging.getLogger(__name__) -QUEUE_GET_WAIT_TIMEOUT = 0.5 - -class MonitoringLoop: - def __init__(self, device_uuid : str, driver : _Driver, samples_queue : queue.Queue) -> None: - self._device_uuid = device_uuid - self._driver = driver - self._samples_queue = samples_queue - self._running = threading.Event() - self._terminate = threading.Event() - self._samples_stream = self._driver.GetState(blocking=True, terminate=self._terminate) - self._collector_thread = threading.Thread(target=self._collect, daemon=True) - - def _collect(self) -> None: - for sample in self._samples_stream: - if self._terminate.is_set(): break - sample = (self._device_uuid, *sample) - self._samples_queue.put_nowait(sample) - - def start(self): - self._collector_thread.start() - self._running.set() - - @property - def is_running(self): return self._running.is_set() - - def stop(self): - self._terminate.set() - self._collector_thread.join() - -class MonitoringLoops: - def __init__(self, database : Database) -> None: - self._monitoring_client = MonitoringClient() - self._database = database - self._samples_queue = queue.Queue() - self._running = threading.Event() - self._terminate = threading.Event() - self._lock = threading.Lock() - self._device_uuid__to__monitoring_loop : Dict[str, MonitoringLoop] = {} - self._exporter_thread = threading.Thread(target=self._export, daemon=True) - - def add(self, device_uuid : str, driver : _Driver) -> None: - with self._lock: - monitoring_loop = self._device_uuid__to__monitoring_loop.get(device_uuid) - if (monitoring_loop is not None) and monitoring_loop.is_running: return - monitoring_loop = MonitoringLoop(device_uuid, driver, self._samples_queue) - self._device_uuid__to__monitoring_loop[device_uuid] = monitoring_loop - monitoring_loop.start() - - def remove(self, device_uuid : str) -> None: - with self._lock: - monitoring_loop = self._device_uuid__to__monitoring_loop.get(device_uuid) - if monitoring_loop is None: return - if monitoring_loop.is_running: monitoring_loop.stop() - self._device_uuid__to__monitoring_loop.pop(device_uuid, None) - - def start(self): - self._exporter_thread.start() - - @property - def is_running(self): return self._running.is_set() - - def stop(self): - self._terminate.set() - self._exporter_thread.join() - - def _export(self) -> None: - if self._database is None: - LOGGER.error('[MonitoringLoops:_export] Database not set. Terminating Exporter.') - return - - self._running.set() - while not self._terminate.is_set(): - try: - sample = self._samples_queue.get(block=True, timeout=QUEUE_GET_WAIT_TIMEOUT) - #LOGGER.debug('[MonitoringLoops:_export] sample={:s}'.format(str(sample))) - except queue.Empty: - continue - - device_uuid, timestamp, endpoint_monitor_resource_key, value = sample - endpoint_monitor_resource_key = re.sub('[^A-Za-z0-9]', '.', endpoint_monitor_resource_key) - str_endpoint_monitor_kpi_key = key_to_str([device_uuid, endpoint_monitor_resource_key], separator=':') - - #db_entries = self._database.dump() - #LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - #for db_entry in db_entries: - # LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover - #LOGGER.info('-----------------------------------------------------------') - - db_endpoint_monitor_kpi : EndPointMonitorKpiModel = get_object( - self._database, EndPointMonitorKpiModel, str_endpoint_monitor_kpi_key, raise_if_not_found=False) - if db_endpoint_monitor_kpi is None: - LOGGER.warning('EndPointMonitorKpi({:s}) not found'.format(str_endpoint_monitor_kpi_key)) - continue - - str_kpi_key = db_endpoint_monitor_kpi.kpi_fk - db_kpi : KpiModel = get_object( - self._database, KpiModel, str_kpi_key, raise_if_not_found=False) - if db_kpi is None: - LOGGER.warning('Kpi({:s}) not found'.format(str_kpi_key)) - continue - - # FIXME: uint32 used for intVal results in out of range issues. Temporarily changed to float - # extend the 'kpi_value' to support long integers (uint64 / int64 / ...) - if isinstance(value, int): - kpi_value_field_name = 'int64Val' - kpi_value_field_cast = int - elif isinstance(value, float): - kpi_value_field_name = 'floatVal' - kpi_value_field_cast = float - elif isinstance(value, bool): - kpi_value_field_name = 'boolVal' - kpi_value_field_cast = bool - else: - kpi_value_field_name = 'stringVal' - kpi_value_field_cast = str - - try: - self._monitoring_client.IncludeKpi(Kpi(**{ - 'kpi_id' : {'kpi_id': {'uuid': db_kpi.kpi_uuid}}, - 'timestamp': {'timestamp': timestamp}, - 'kpi_value': {kpi_value_field_name: kpi_value_field_cast(value)} - })) - except: # pylint: disable=bare-except - LOGGER.exception('Unable to format/send Kpi') - - self._running.clear() diff --git a/src/device/service/Tools.py b/src/device/service/Tools.py new file mode 100644 index 000000000..086e5a071 --- /dev/null +++ b/src/device/service/Tools.py @@ -0,0 +1,286 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +from typing import Any, Dict, List, Tuple +from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME +from common.method_wrappers.ServiceExceptions import InvalidArgumentException +from common.proto.context_pb2 import ConfigActionEnum, Device, DeviceConfig +from common.proto.device_pb2 import MonitoringSettings +from common.proto.kpi_sample_types_pb2 import KpiSampleType +from common.tools.grpc.Tools import grpc_message_to_json +from .driver_api._Driver import _Driver, RESOURCE_ENDPOINTS +from .monitoring.MonitoringLoops import MonitoringLoops + +ERROR_ENDPOINT = 'Device({:s}): GetConfig retrieved malformed Endpoint({:s})' +ERROR_GET = 'Device({:s}): Unable to Get resource(key={:s}); error({:s})' +ERROR_GET_INIT = 'Device({:s}): Unable to Get Initial resource(key={:s}); error({:s})' +ERROR_SET = 'Device({:s}): Unable to Set resource(key={:s}, value={:s}); error({:s})' +ERROR_DELETE = 'Device({:s}): Unable to Delete resource(key={:s}, value={:s}); error({:s})' +ERROR_SAMPLETYPE = 'Device({:s})/EndPoint({:s}): SampleType({:s}/{:s}) not supported' +ERROR_SUBSCRIBE = 'Device({:s}): Unable to Subscribe subscription(key={:s}, duration={:s}, interval={:s}); '+\ + 'error({:s})' +ERROR_MISSING_KPI = 'Device({:s}): Kpi({:s}) not found' +ERROR_UNSUBSCRIBE = 'Device({:s}): Unable to Unsubscribe subscription(key={:s}, duration={:s}, interval={:s}); '+\ + 'error({:s})' + +def check_connect_rules(device_config : DeviceConfig) -> Dict[str, Any]: + connection_config_rules = dict() + unexpected_config_rules = list() + for config_rule in device_config.config_rules: + is_action_set = (config_rule.action == ConfigActionEnum.CONFIGACTION_SET) + is_custom_rule = (config_rule.WhichOneof('config_rule') == 'custom') + if is_action_set and is_custom_rule and (config_rule.custom.resource_key.startswith('_connect/')): + connect_attribute = config_rule.custom.resource_key.replace('_connect/', '') + connection_config_rules[connect_attribute] = config_rule.custom.resource_value + else: + unexpected_config_rules.append(config_rule) + + if len(unexpected_config_rules) > 0: + unexpected_config_rules = grpc_message_to_json(device_config) + unexpected_config_rules = unexpected_config_rules['config_rules'] + unexpected_config_rules = list(filter( + lambda cr: cr.get('custom', {})['resource_key'].replace('_connect/', '') not in connection_config_rules, + unexpected_config_rules)) + str_unexpected_config_rules = json.dumps(unexpected_config_rules, sort_keys=True) + raise InvalidArgumentException( + 'device.device_config.config_rules', str_unexpected_config_rules, + extra_details='RPC method AddDevice only accepts connection Config Rules that should start '\ + 'with "_connect/" tag. Others should be configured after adding the device.') + + return connection_config_rules + +def get_connect_rules(device_config : DeviceConfig) -> Dict[str, Any]: + connect_rules = dict() + for config_rule in device_config.config_rules: + if config_rule.action != ConfigActionEnum.CONFIGACTION_SET: continue + if config_rule.WhichOneof('config_rule') != 'custom': continue + if not config_rule.custom.resource_key.startswith('_connect/'): continue + connect_attribute = config_rule.custom.resource_key.replace('_connect/', '') + connect_rules[connect_attribute] = config_rule.custom.resource_value + return connect_rules + +def check_no_endpoints(device_endpoints) -> None: + if len(device_endpoints) == 0: return + unexpected_endpoints = [] + for device_endpoint in device_endpoints: + unexpected_endpoints.append(grpc_message_to_json(device_endpoint)) + str_unexpected_endpoints = json.dumps(unexpected_endpoints, sort_keys=True) + raise InvalidArgumentException( + 'device.device_endpoints', str_unexpected_endpoints, + extra_details='RPC method AddDevice does not accept Endpoints. Endpoints are discovered through '\ + 'interrogation of the physical device.') + +def populate_endpoints(device : Device, driver : _Driver, monitoring_loops : MonitoringLoops) -> List[str]: + device_uuid = device.device_id.device_uuid.uuid + + resources_to_get = [RESOURCE_ENDPOINTS] + results_getconfig = driver.GetConfig(resources_to_get) + + errors : List[str] = list() + for endpoint in results_getconfig: + if len(endpoint) != 2: + errors.append(ERROR_ENDPOINT.format(device_uuid, str(endpoint))) + continue + + resource_key, resource_value = endpoint + if isinstance(resource_value, Exception): + errors.append(ERROR_GET.format(device_uuid, str(resource_key), str(resource_value))) + continue + + endpoint_uuid = resource_value.get('uuid') + + device_endpoint = device.device_endpoints.add() + device_endpoint.topology_id.context_id.context_uuid.uuid = DEFAULT_CONTEXT_NAME + device_endpoint.topology_id.topology_uuid.uuid = DEFAULT_TOPOLOGY_NAME + device_endpoint.endpoint_id.device_id.device_uuid.uuid = device_uuid + device_endpoint.endpoint_id.endpoint_uuid.uuid = endpoint_uuid + device_endpoint.endpoint_type = resource_value.get('type') + + sample_types : Dict[int, str] = resource_value.get('sample_types', {}) + for kpi_sample_type, monitor_resource_key in sample_types.items(): + device_endpoint.kpi_sample_types.append(kpi_sample_type) + monitoring_loops.add_resource_key(device_uuid, endpoint_uuid, kpi_sample_type, monitor_resource_key) + + return errors + +def populate_config_rules(device : Device, driver : _Driver) -> List[str]: + device_uuid = device.device_id.device_uuid.uuid + + resources_to_get = ['ALL'] + results_getconfig = driver.GetConfig() + + errors : List[str] = list() + for resource_key, resource_value in zip(resources_to_get, results_getconfig): + if isinstance(resource_value, Exception): + errors.append(ERROR_GET.format(device_uuid, str(resource_key), str(resource_value))) + continue + + config_rule = device.device_config.config_rules.add() + config_rule.action = ConfigActionEnum.CONFIGACTION_SET + config_rule.custom.resource_key = resource_key + config_rule.custom.resource_value = json.dumps(resource_value, sort_keys=True) + + return errors + +def populate_initial_config_rules(device_uuid : str, device_config : DeviceConfig, driver : _Driver) -> List[str]: + results_getinitconfig = driver.GetInitialConfig() + + errors : List[str] = list() + for resource_key, resource_value in results_getinitconfig: + if isinstance(resource_value, Exception): + errors.append(ERROR_GET_INIT.format(device_uuid, str(resource_key), str(resource_value))) + continue + + config_rule = device_config.config_rules.add() + config_rule.action = ConfigActionEnum.CONFIGACTION_SET + config_rule.custom.resource_key = resource_key + config_rule.custom.resource_value = json.dumps(resource_value, sort_keys=True) + + return errors + +def compute_rules_to_add_delete( + device : Device, request : Device +) -> Tuple[List[Tuple[str, Any]], List[Tuple[str, Any]]]: + # convert config rules from context into a dictionary + # TODO: add support for non-custom config rules + context_config_rules = { + config_rule.custom.resource_key: config_rule.custom.resource_value + for config_rule in device.device_config.config_rules + if config_rule.WhichOneof('config_rule') == 'custom' + } + + # convert config rules from request into a list + # TODO: add support for non-custom config rules + request_config_rules = [ + (config_rule.action, config_rule.custom.resource_key, config_rule.custom.resource_value) + for config_rule in request.device_config.config_rules + if config_rule.WhichOneof('config_rule') == 'custom' + ] + + resources_to_set : List[Tuple[str, Any]] = [] # key, value + resources_to_delete : List[Tuple[str, Any]] = [] # key, value + + for action, key, value in request_config_rules: + if action == ConfigActionEnum.CONFIGACTION_SET: + if (key in context_config_rules) and (context_config_rules[key][0] == value): continue + resources_to_set.append((key, value)) + elif action == ConfigActionEnum.CONFIGACTION_DELETE: + if key not in context_config_rules: continue + resources_to_delete.append((key, value)) + + return resources_to_set, resources_to_delete + +def configure_rules(device : Device, driver : _Driver, resources_to_set : List[Tuple[str, Any]]) -> List[str]: + device_uuid = device.device_id.device_uuid.uuid + + results_setconfig = driver.SetConfig(resources_to_set) + + errors : List[str] = list() + for (resource_key, resource_value), result in zip(resources_to_set, results_setconfig): + if isinstance(result, Exception): + errors.append(ERROR_SET.format(device_uuid, str(resource_key), str(resource_value), str(result))) + continue + # add to config of device + config_rule = device.device_config.config_rules.add() + config_rule.action = ConfigActionEnum.CONFIGACTION_SET + config_rule.custom.resource_key = resource_key + config_rule.custom.resource_value = json.dumps(resource_value, sort_keys=True) + + return errors + +def deconfigure_rules(device : Device, driver : _Driver, resources_to_delete : List[Tuple[str, Any]]) -> List[str]: + device_uuid = device.device_id.device_uuid.uuid + + results_deleteconfig = driver.DeleteConfig(resources_to_delete) + + errors : List[str] = list() + for (resource_key, resource_value), result in zip(resources_to_delete, results_deleteconfig): + if isinstance(result, Exception): + errors.append(ERROR_DELETE.format(device_uuid, str(resource_key), str(resource_value), str(result))) + continue + # remove from config of device + config_rule = device.device_config.config_rules.add() + config_rule.action = ConfigActionEnum.CONFIGACTION_SET + config_rule.custom.resource_key = resource_key + config_rule.custom.resource_value = json.dumps(resource_value, sort_keys=True) + + return errors + +def subscribe_kpi(request : MonitoringSettings, driver : _Driver, monitoring_loops : MonitoringLoops) -> List[str]: + kpi_uuid = request.kpi_id.kpi_id.uuid + device_uuid = request.kpi_descriptor.device_id.device_uuid.uuid + endpoint_uuid = request.kpi_descriptor.endpoint_id.endpoint_uuid.uuid + kpi_sample_type = request.kpi_descriptor.kpi_sample_type + + resource_key = monitoring_loops.get_resource_key(device_uuid, endpoint_uuid, kpi_sample_type) + if resource_key is None: + kpi_sample_type_name = KpiSampleType.Name(kpi_sample_type).upper().replace('KPISAMPLETYPE_', '') + return [ + ERROR_SAMPLETYPE.format( + str(device_uuid), str(endpoint_uuid), str(kpi_sample_type), str(kpi_sample_type_name) + ) + ] + + sampling_duration = request.sampling_duration_s # seconds + sampling_interval = request.sampling_interval_s # seconds + + resources_to_subscribe = [(resource_key, sampling_duration, sampling_interval)] + results_subscribestate = driver.SubscribeState(resources_to_subscribe) + + errors : List[str] = list() + for (resource_key, duration, interval), result in zip(resources_to_subscribe, results_subscribestate): + if isinstance(result, Exception): + errors.append(ERROR_SUBSCRIBE.format( + str(device_uuid), str(resource_key), str(duration), str(interval), str(result) + )) + continue + + monitoring_loops.add_kpi(device_uuid, resource_key, kpi_uuid, sampling_duration, sampling_interval) + monitoring_loops.add_device(device_uuid, driver) + + return errors + +def unsubscribe_kpi(request : MonitoringSettings, driver : _Driver, monitoring_loops : MonitoringLoops) -> List[str]: + kpi_uuid = request.kpi_id.kpi_id.uuid + device_uuid = request.kpi_descriptor.device_id.device_uuid.uuid + #endpoint_uuid = request.kpi_descriptor.endpoint_id.endpoint_uuid.uuid + #kpi_sample_type = request.kpi_descriptor.kpi_sample_type + + # TODO: consider if further validation needs to be done (correct endpoint_uuid?, correct kpi_sample_type?) + #resource_key = monitoring_loops.get_resource_key(device_uuid, endpoint_uuid, kpi_sample_type) + #if resource_key is None: + # kpi_sample_type_name = KpiSampleType.Name(kpi_sample_type).upper().replace('KPISAMPLETYPE_', '') + # return [ERROR_SAMPLETYPE.format(device_uuid, endpoint_uuid, str(kpi_sample_type), str(kpi_sample_type_name))] + + kpi_details = monitoring_loops.get_kpi_by_uuid(kpi_uuid) + if kpi_details is None: + return [ERROR_MISSING_KPI.format(str(device_uuid), str(kpi_uuid))] + + device_uuid, resource_key, sampling_duration, sampling_interval = kpi_details + + resources_to_unsubscribe = [(resource_key, sampling_duration, sampling_interval)] + results_unsubscribestate = driver.UnsubscribeState(resources_to_unsubscribe) + + errors : List[str] = list() + for (resource_key, duration, interval), result in zip(resources_to_unsubscribe, results_unsubscribestate): + if isinstance(result, Exception): + errors.append(ERROR_UNSUBSCRIBE.format( + device_uuid, str(resource_key), str(duration), str(interval), str(result))) + continue + + monitoring_loops.remove_kpi(kpi_uuid) + #monitoring_loops.remove_device(device_uuid) # Do not remove; one monitoring_loop/device used by multiple requests + + return errors diff --git a/src/device/service/__main__.py b/src/device/service/__main__.py index 5c9b41531..c69393fc3 100644 --- a/src/device/service/__main__.py +++ b/src/device/service/__main__.py @@ -20,7 +20,7 @@ from common.Settings import ( wait_for_environment_variables) from .DeviceService import DeviceService from .driver_api.DriverFactory import DriverFactory -from .driver_api.DriverInstanceCache import DriverInstanceCache +from .driver_api.DriverInstanceCache import DriverInstanceCache, preload_drivers from .drivers import DRIVERS terminate = threading.Event() @@ -58,6 +58,9 @@ def main(): driver_factory = DriverFactory(DRIVERS) driver_instance_cache = DriverInstanceCache(driver_factory) + # Initialize drivers with existing devices in context + preload_drivers(driver_instance_cache) + # Starting device service grpc_service = DeviceService(driver_instance_cache) grpc_service.start() diff --git a/src/device/service/database/ConfigModel.py b/src/device/service/database/ConfigModel.py deleted file mode 100644 index 8472a44ea..000000000 --- a/src/device/service/database/ConfigModel.py +++ /dev/null @@ -1,122 +0,0 @@ -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import functools, logging, operator -from enum import Enum -from typing import Dict, List, Tuple, Union -from common.orm.Database import Database -from common.orm.HighLevel import get_object, get_or_create_object, update_or_create_object -from common.orm.backend.Tools import key_to_str -from common.orm.fields.EnumeratedField import EnumeratedField -from common.orm.fields.ForeignKeyField import ForeignKeyField -from common.orm.fields.IntegerField import IntegerField -from common.orm.fields.PrimaryKeyField import PrimaryKeyField -from common.orm.fields.StringField import StringField -from common.orm.model.Model import Model -from common.proto.context_pb2 import ConfigActionEnum -from common.tools.grpc.Tools import grpc_message_to_json_string -from .Tools import fast_hasher, grpc_to_enum, remove_dict_key - -LOGGER = logging.getLogger(__name__) - -class ORM_ConfigActionEnum(Enum): - UNDEFINED = ConfigActionEnum.CONFIGACTION_UNDEFINED - SET = ConfigActionEnum.CONFIGACTION_SET - DELETE = ConfigActionEnum.CONFIGACTION_DELETE - -grpc_to_enum__config_action = functools.partial( - grpc_to_enum, ConfigActionEnum, ORM_ConfigActionEnum) - -class ConfigModel(Model): # pylint: disable=abstract-method - pk = PrimaryKeyField() - - def dump(self) -> List[Dict]: - db_config_rule_pks = self.references(ConfigRuleModel) - config_rules = [ConfigRuleModel(self.database, pk).dump(include_position=True) for pk,_ in db_config_rule_pks] - config_rules = sorted(config_rules, key=operator.itemgetter('position')) - return [remove_dict_key(config_rule, 'position') for config_rule in config_rules] - -class ConfigRuleModel(Model): # pylint: disable=abstract-method - pk = PrimaryKeyField() - config_fk = ForeignKeyField(ConfigModel) - position = IntegerField(min_value=0, required=True) - action = EnumeratedField(ORM_ConfigActionEnum, required=True) - key = StringField(required=True, allow_empty=False) - value = StringField(required=False, allow_empty=True) - - def dump(self, include_position=True) -> Dict: # pylint: disable=arguments-differ - result = { - 'action': self.action.value, - 'custom': { - 'resource_key': self.key, - 'resource_value': self.value, - }, - } - if include_position: result['position'] = self.position - return result - -def delete_all_config_rules(database : Database, db_parent_pk : str, config_name : str) -> None: - str_config_key = key_to_str([db_parent_pk, config_name], separator=':') - db_config : ConfigModel = get_object(database, ConfigModel, str_config_key, raise_if_not_found=False) - if db_config is None: return - db_config_rule_pks = db_config.references(ConfigRuleModel) - for pk,_ in db_config_rule_pks: ConfigRuleModel(database, pk).delete() - -def grpc_config_rules_to_raw(grpc_config_rules) -> List[Tuple[ORM_ConfigActionEnum, str, str]]: - def translate(grpc_config_rule): - action = grpc_to_enum__config_action(grpc_config_rule.action) - config_rule_type = str(grpc_config_rule.WhichOneof('config_rule')) - if config_rule_type != 'custom': - raise NotImplementedError('ConfigRule of type {:s} is not implemented: {:s}'.format( - config_rule_type, grpc_message_to_json_string(grpc_config_rule))) - return action, grpc_config_rule.custom.resource_key, grpc_config_rule.custom.resource_value - return [translate(grpc_config_rule) for grpc_config_rule in grpc_config_rules] - -def get_config_rules( - database : Database, db_parent_pk : str, config_name : str - ) -> List[Tuple[ORM_ConfigActionEnum, str, str]]: - - str_config_key = key_to_str([db_parent_pk, config_name], separator=':') - db_config = get_object(database, ConfigModel, str_config_key, raise_if_not_found=False) - return [] if db_config is None else [ - # pylint: disable=no-member, protected-access - (ORM_ConfigActionEnum._value2member_map_.get(config_rule['action']), - config_rule['custom']['resource_key'], config_rule['custom']['resource_value']) - for config_rule in db_config.dump() - if 'custom' in config_rule - ] - -def update_config( - database : Database, db_parent_pk : str, config_name : str, - raw_config_rules : List[Tuple[ORM_ConfigActionEnum, str, str]] -) -> List[Tuple[Union[ConfigModel, ConfigRuleModel], bool]]: - - str_config_key = key_to_str([db_parent_pk, config_name], separator=':') - result : Tuple[ConfigModel, bool] = get_or_create_object(database, ConfigModel, str_config_key) - db_config, created = result - - db_objects : List[Tuple[Union[ConfigModel, ConfigRuleModel], bool]] = [(db_config, created)] - - for position,(action, resource_key, resource_value) in enumerate(raw_config_rules): - str_rule_key_hash = fast_hasher(resource_key) - str_config_rule_key = key_to_str([db_config.pk, str_rule_key_hash], separator=':') - result : Tuple[ConfigRuleModel, bool] = update_or_create_object( - database, ConfigRuleModel, str_config_rule_key, { - 'config_fk': db_config, 'position': position, 'action': action, 'key': resource_key, - 'value': resource_value, - }) - db_config_rule, updated = result - db_objects.append((db_config_rule, updated)) - - return db_objects diff --git a/src/device/service/database/ContextModel.py b/src/device/service/database/ContextModel.py deleted file mode 100644 index a609e1ba9..000000000 --- a/src/device/service/database/ContextModel.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -from typing import Dict, List -from common.orm.fields.PrimaryKeyField import PrimaryKeyField -from common.orm.fields.StringField import StringField -from common.orm.model.Model import Model - -LOGGER = logging.getLogger(__name__) - -class ContextModel(Model): - pk = PrimaryKeyField() - context_uuid = StringField(required=True, allow_empty=False) - - def dump_id(self) -> Dict: - return {'context_uuid': {'uuid': self.context_uuid}} - - def dump_topology_ids(self) -> List[Dict]: - from .TopologyModel import TopologyModel # pylint: disable=import-outside-toplevel - db_topology_pks = self.references(TopologyModel) - return [TopologyModel(self.database, pk).dump_id() for pk,_ in db_topology_pks] - - def dump(self, include_topologies=False) -> Dict: # pylint: disable=arguments-differ - result = {'context_id': self.dump_id()} - if include_topologies: result['topology_ids'] = self.dump_topology_ids() - return result diff --git a/src/device/service/database/DatabaseTools.py b/src/device/service/database/DatabaseTools.py deleted file mode 100644 index 9d3b712ca..000000000 --- a/src/device/service/database/DatabaseTools.py +++ /dev/null @@ -1,127 +0,0 @@ -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import grpc -from typing import Any, Dict, Tuple -from common.method_wrappers.ServiceExceptions import InvalidArgumentException -from common.orm.Database import Database -from common.orm.HighLevel import get_or_create_object, update_or_create_object -from common.orm.backend.Tools import key_to_str -from common.proto.context_pb2 import Device, DeviceId -from context.client.ContextClient import ContextClient -from device.service.driver_api.FilterFields import FilterFieldEnum -from .ConfigModel import delete_all_config_rules, grpc_config_rules_to_raw, update_config -from .ContextModel import ContextModel -from .DeviceModel import DeviceModel, DriverModel, grpc_to_enum__device_operational_status, set_drivers -from .EndPointModel import EndPointModel, set_endpoint_monitors -from .TopologyModel import TopologyModel - -def update_device_in_local_database(database : Database, device : Device) -> Tuple[DeviceModel, bool]: - device_uuid = device.device_id.device_uuid.uuid - - for i,endpoint in enumerate(device.device_endpoints): - endpoint_device_uuid = endpoint.endpoint_id.device_id.device_uuid.uuid - if len(endpoint_device_uuid) == 0: endpoint_device_uuid = device_uuid - if device_uuid != endpoint_device_uuid: - raise InvalidArgumentException( - 'request.device_endpoints[{:d}].device_id.device_uuid.uuid'.format(i), endpoint_device_uuid, - ['should be == {:s}({:s})'.format('request.device_id.device_uuid.uuid', device_uuid)]) - - initial_config_result = update_config(database, device_uuid, 'initial', []) - - config_rules = grpc_config_rules_to_raw(device.device_config.config_rules) - delete_all_config_rules(database, device_uuid, 'running') - running_config_result = update_config(database, device_uuid, 'running', config_rules) - - result : Tuple[DeviceModel, bool] = update_or_create_object(database, DeviceModel, device_uuid, { - 'device_uuid' : device_uuid, - 'device_type' : device.device_type, - 'device_operational_status': grpc_to_enum__device_operational_status(device.device_operational_status), - 'device_initial_config_fk' : initial_config_result[0][0], - 'device_running_config_fk' : running_config_result[0][0], - }) - db_device, updated = result - set_drivers(database, db_device, device.device_drivers) - - for i,endpoint in enumerate(device.device_endpoints): - endpoint_uuid = endpoint.endpoint_id.endpoint_uuid.uuid - endpoint_device_uuid = endpoint.endpoint_id.device_id.device_uuid.uuid - if len(endpoint_device_uuid) == 0: endpoint_device_uuid = device_uuid - - str_endpoint_key = key_to_str([device_uuid, endpoint_uuid]) - endpoint_attributes = { - 'device_fk' : db_device, - 'endpoint_uuid': endpoint_uuid, - 'endpoint_type': endpoint.endpoint_type, - } - - endpoint_topology_context_uuid = endpoint.endpoint_id.topology_id.context_id.context_uuid.uuid - endpoint_topology_uuid = endpoint.endpoint_id.topology_id.topology_uuid.uuid - if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0: - result : Tuple[ContextModel, bool] = get_or_create_object( - database, ContextModel, endpoint_topology_context_uuid, defaults={ - 'context_uuid': endpoint_topology_context_uuid, - }) - db_context, _ = result - - str_topology_key = key_to_str([endpoint_topology_context_uuid, endpoint_topology_uuid]) - result : Tuple[TopologyModel, bool] = get_or_create_object( - database, TopologyModel, str_topology_key, defaults={ - 'context_fk': db_context, - 'topology_uuid': endpoint_topology_uuid, - }) - db_topology, _ = result - - str_endpoint_key = key_to_str([str_endpoint_key, str_topology_key], separator=':') - endpoint_attributes['topology_fk'] = db_topology - - result : Tuple[EndPointModel, bool] = update_or_create_object( - database, EndPointModel, str_endpoint_key, endpoint_attributes) - db_endpoint, db_endpoint_updated = result - - set_endpoint_monitors(database, db_endpoint, endpoint.kpi_sample_types) - - updated = updated or db_endpoint_updated - - return db_device, updated - -def sync_device_from_context( - device_uuid : str, context_client : ContextClient, database : Database - ) -> Tuple[DeviceModel, bool]: - - try: - device : Device = context_client.GetDevice(DeviceId(device_uuid={'uuid': device_uuid})) - except grpc.RpcError as e: - if e.code() != grpc.StatusCode.NOT_FOUND: raise # pylint: disable=no-member - return None - return update_device_in_local_database(database, device) - -def sync_device_to_context(db_device : DeviceModel, context_client : ContextClient) -> None: - if db_device is None: return - context_client.SetDevice(Device(**db_device.dump( - include_config_rules=True, include_drivers=True, include_endpoints=True))) - -def delete_device_from_context(db_device : DeviceModel, context_client : ContextClient) -> None: - if db_device is None: return - context_client.RemoveDevice(DeviceId(**db_device.dump_id())) - -def get_device_driver_filter_fields(db_device : DeviceModel) -> Dict[FilterFieldEnum, Any]: - if db_device is None: return {} - database = db_device.database - db_driver_pks = db_device.references(DriverModel) - db_driver_names = [DriverModel(database, pk).driver.value for pk,_ in db_driver_pks] - return { - FilterFieldEnum.DEVICE_TYPE: db_device.device_type, - FilterFieldEnum.DRIVER : db_driver_names, - } diff --git a/src/device/service/database/DeviceModel.py b/src/device/service/database/DeviceModel.py deleted file mode 100644 index 9dd63d36e..000000000 --- a/src/device/service/database/DeviceModel.py +++ /dev/null @@ -1,106 +0,0 @@ -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import functools, logging -from enum import Enum -from typing import Dict, List -from common.orm.Database import Database -from common.orm.backend.Tools import key_to_str -from common.orm.fields.EnumeratedField import EnumeratedField -from common.orm.fields.ForeignKeyField import ForeignKeyField -from common.orm.fields.PrimaryKeyField import PrimaryKeyField -from common.orm.fields.StringField import StringField -from common.orm.model.Model import Model -from common.proto.context_pb2 import DeviceDriverEnum, DeviceOperationalStatusEnum -from .ConfigModel import ConfigModel -from .Tools import grpc_to_enum - -LOGGER = logging.getLogger(__name__) - -class ORM_DeviceDriverEnum(Enum): - UNDEFINED = DeviceDriverEnum.DEVICEDRIVER_UNDEFINED - OPENCONFIG = DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG - TRANSPORT_API = DeviceDriverEnum.DEVICEDRIVER_TRANSPORT_API - P4 = DeviceDriverEnum.DEVICEDRIVER_P4 - IETF_NETWORK_TOPOLOGY = DeviceDriverEnum.DEVICEDRIVER_IETF_NETWORK_TOPOLOGY - ONF_TR_352 = DeviceDriverEnum.DEVICEDRIVER_ONF_TR_352 - XR = DeviceDriverEnum.DEVICEDRIVER_XR - -grpc_to_enum__device_driver = functools.partial( - grpc_to_enum, DeviceDriverEnum, ORM_DeviceDriverEnum) - -class ORM_DeviceOperationalStatusEnum(Enum): - UNDEFINED = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_UNDEFINED - DISABLED = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_DISABLED - ENABLED = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED - -grpc_to_enum__device_operational_status = functools.partial( - grpc_to_enum, DeviceOperationalStatusEnum, ORM_DeviceOperationalStatusEnum) - -class DeviceModel(Model): - pk = PrimaryKeyField() - device_uuid = StringField(required=True, allow_empty=False) - device_type = StringField() - device_initial_config_fk = ForeignKeyField(ConfigModel) - device_running_config_fk = ForeignKeyField(ConfigModel) - device_operational_status = EnumeratedField(ORM_DeviceOperationalStatusEnum, required=True) - - def dump_id(self) -> Dict: - return {'device_uuid': {'uuid': self.device_uuid}} - - def dump_initial_config(self) -> Dict: - return ConfigModel(self.database, self.device_initial_config_fk).dump() - - def dump_running_config(self) -> Dict: - return ConfigModel(self.database, self.device_running_config_fk).dump() - - def dump_drivers(self) -> List[int]: - db_driver_pks = self.references(DriverModel) - return [DriverModel(self.database, pk).dump() for pk,_ in db_driver_pks] - - def dump_endpoints(self) -> List[Dict]: - from .EndPointModel import EndPointModel # pylint: disable=import-outside-toplevel - db_endpoints_pks = self.references(EndPointModel) - return [EndPointModel(self.database, pk).dump() for pk,_ in db_endpoints_pks] - - def dump( # pylint: disable=arguments-differ - self, include_config_rules=True, include_drivers=True, include_endpoints=True - ) -> Dict: - result = { - 'device_id': self.dump_id(), - 'device_type': self.device_type, - 'device_operational_status': self.device_operational_status.value, - } - if include_config_rules: result.setdefault('device_config', {})['config_rules'] = self.dump_running_config() - if include_drivers: result['device_drivers'] = self.dump_drivers() - if include_endpoints: result['device_endpoints'] = self.dump_endpoints() - return result - -class DriverModel(Model): # pylint: disable=abstract-method - pk = PrimaryKeyField() - device_fk = ForeignKeyField(DeviceModel) - driver = EnumeratedField(ORM_DeviceDriverEnum, required=True) - - def dump(self) -> Dict: - return self.driver.value - -def set_drivers(database : Database, db_device : DeviceModel, grpc_device_drivers): - db_device_pk = db_device.pk - for driver in grpc_device_drivers: - orm_driver = grpc_to_enum__device_driver(driver) - str_device_driver_key = key_to_str([db_device_pk, orm_driver.name]) - db_device_driver = DriverModel(database, str_device_driver_key) - db_device_driver.device_fk = db_device - db_device_driver.driver = orm_driver - db_device_driver.save() diff --git a/src/device/service/database/EndPointModel.py b/src/device/service/database/EndPointModel.py deleted file mode 100644 index 3d4435737..000000000 --- a/src/device/service/database/EndPointModel.py +++ /dev/null @@ -1,79 +0,0 @@ -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -from typing import Dict, List -from common.orm.Database import Database -from common.orm.HighLevel import update_or_create_object -from common.orm.backend.Tools import key_to_str -from common.orm.fields.EnumeratedField import EnumeratedField -from common.orm.fields.ForeignKeyField import ForeignKeyField -from common.orm.fields.PrimaryKeyField import PrimaryKeyField -from common.orm.fields.StringField import StringField -from common.orm.model.Model import Model -from .DeviceModel import DeviceModel -from .KpiSampleType import ORM_KpiSampleTypeEnum, grpc_to_enum__kpi_sample_type -from .TopologyModel import TopologyModel - -LOGGER = logging.getLogger(__name__) - -class EndPointModel(Model): - pk = PrimaryKeyField() - topology_fk = ForeignKeyField(TopologyModel, required=False) - device_fk = ForeignKeyField(DeviceModel) - endpoint_uuid = StringField(required=True, allow_empty=False) - endpoint_type = StringField() - - def dump_id(self) -> Dict: - device_id = DeviceModel(self.database, self.device_fk).dump_id() - result = { - 'device_id': device_id, - 'endpoint_uuid': {'uuid': self.endpoint_uuid}, - } - if self.topology_fk is not None: - result['topology_id'] = TopologyModel(self.database, self.topology_fk).dump_id() - return result - - def dump_kpi_sample_types(self) -> List[int]: - db_kpi_sample_type_pks = self.references(EndPointMonitorModel) - return [EndPointMonitorModel(self.database, pk).dump() for pk,_ in db_kpi_sample_type_pks] - - def dump( # pylint: disable=arguments-differ - self, include_kpi_sample_types=True - ) -> Dict: - result = { - 'endpoint_id': self.dump_id(), - 'endpoint_type': self.endpoint_type, - } - if include_kpi_sample_types: result['kpi_sample_types'] = self.dump_kpi_sample_types() - return result - -class EndPointMonitorModel(Model): # pylint: disable=abstract-method - pk = PrimaryKeyField() - endpoint_fk = ForeignKeyField(EndPointModel) - resource_key = StringField(required=True, allow_empty=True) - kpi_sample_type = EnumeratedField(ORM_KpiSampleTypeEnum, required=True) - - def dump(self) -> Dict: - return self.kpi_sample_type.value - -def set_endpoint_monitors(database : Database, db_endpoint : EndPointModel, grpc_endpoint_kpi_sample_types): - db_endpoint_pk = db_endpoint.pk - for kpi_sample_type in grpc_endpoint_kpi_sample_types: - orm_kpi_sample_type = grpc_to_enum__kpi_sample_type(kpi_sample_type) - str_endpoint_kpi_sample_type_key = key_to_str([db_endpoint_pk, str(orm_kpi_sample_type.value)]) - update_or_create_object(database, EndPointMonitorModel, str_endpoint_kpi_sample_type_key, { - 'endpoint_fk' : db_endpoint, - 'kpi_sample_type': orm_kpi_sample_type, - }) diff --git a/src/device/service/database/KpiModel.py b/src/device/service/database/KpiModel.py deleted file mode 100644 index e3631d380..000000000 --- a/src/device/service/database/KpiModel.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -from typing import Dict -from common.orm.fields.EnumeratedField import EnumeratedField -from common.orm.fields.FloatField import FloatField -from common.orm.fields.ForeignKeyField import ForeignKeyField -from common.orm.fields.PrimaryKeyField import PrimaryKeyField -from common.orm.fields.StringField import StringField -from common.orm.model.Model import Model -from .DeviceModel import DeviceModel -from .EndPointModel import EndPointModel -from .KpiSampleType import ORM_KpiSampleTypeEnum - -LOGGER = logging.getLogger(__name__) - -class KpiModel(Model): - pk = PrimaryKeyField() - kpi_uuid = StringField(required=True, allow_empty=False) - kpi_description = StringField(required=False, allow_empty=True) - kpi_sample_type = EnumeratedField(ORM_KpiSampleTypeEnum, required=True) - device_fk = ForeignKeyField(DeviceModel) - endpoint_fk = ForeignKeyField(EndPointModel) - sampling_duration = FloatField(min_value=0, required=True) - sampling_interval = FloatField(min_value=0, required=True) - - def dump_id(self) -> Dict: - return {'kpi_id': {'uuid': self.kpi_uuid}} - - def dump_descriptor(self) -> Dict: - result = { - 'kpi_description': self.kpi_description, - 'kpi_sample_type': self.kpi_sample_type.value, - } - if self.device_fk is not None: - result['device_id'] = DeviceModel(self.database, self.device_fk).dump_id() - if self.endpoint_fk is not None: - result['endpoint_id'] = EndPointModel(self.database, self.endpoint_fk).dump_id() - return result - - def dump(self) -> Dict: - return { - 'kpi_id': self.dump_id(), - 'kpi_descriptor': self.dump_descriptor(), - 'sampling_duration_s': self.sampling_duration, - 'sampling_interval_s': self.sampling_interval, - } diff --git a/src/device/service/database/KpiSampleType.py b/src/device/service/database/KpiSampleType.py deleted file mode 100644 index 0a2015b3f..000000000 --- a/src/device/service/database/KpiSampleType.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import functools -from enum import Enum -from common.proto.kpi_sample_types_pb2 import KpiSampleType -from .Tools import grpc_to_enum - -class ORM_KpiSampleTypeEnum(Enum): - UNKNOWN = KpiSampleType.KPISAMPLETYPE_UNKNOWN - PACKETS_TRANSMITTED = KpiSampleType.KPISAMPLETYPE_PACKETS_TRANSMITTED - PACKETS_RECEIVED = KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED - BYTES_TRANSMITTED = KpiSampleType.KPISAMPLETYPE_BYTES_TRANSMITTED - BYTES_RECEIVED = KpiSampleType.KPISAMPLETYPE_BYTES_RECEIVED - -grpc_to_enum__kpi_sample_type = functools.partial( - grpc_to_enum, KpiSampleType, ORM_KpiSampleTypeEnum) diff --git a/src/device/service/database/Tools.py b/src/device/service/database/Tools.py deleted file mode 100644 index 43bb71bd9..000000000 --- a/src/device/service/database/Tools.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import hashlib, re -from enum import Enum -from typing import Dict, List, Tuple, Union - -# Convenient helper function to remove dictionary items in dict/list/set comprehensions. - -def remove_dict_key(dictionary : Dict, key : str): - dictionary.pop(key, None) - return dictionary - -# Enumeration classes are redundant with gRPC classes, but gRPC does not provide a programmatical method to retrieve -# the values it expects from strings containing the desired value symbol or its integer value, so a kind of mapping is -# required. Besides, ORM Models expect Enum classes in EnumeratedFields; we create specific and conveniently defined -# Enum classes to serve both purposes. - -def grpc_to_enum(grpc_enum_class, orm_enum_class : Enum, grpc_enum_value): - grpc_enum_name = grpc_enum_class.Name(grpc_enum_value) - grpc_enum_prefix = orm_enum_class.__name__.upper() - grpc_enum_prefix = re.sub(r'^ORM_(.+)$', r'\1', grpc_enum_prefix) - grpc_enum_prefix = re.sub(r'^(.+)ENUM$', r'\1', grpc_enum_prefix) - grpc_enum_prefix = grpc_enum_prefix + '_' - orm_enum_name = grpc_enum_name.replace(grpc_enum_prefix, '') - orm_enum_value = orm_enum_class._member_map_.get(orm_enum_name) # pylint: disable=protected-access - return orm_enum_value - -# For some models, it is convenient to produce a string hash for fast comparisons of existence or modification. Method -# fast_hasher computes configurable length (between 1 and 64 byte) hashes and retrieves them in hex representation. - -FASTHASHER_ITEM_ACCEPTED_FORMAT = 'Union[bytes, str]' -FASTHASHER_DATA_ACCEPTED_FORMAT = 'Union[{fmt:s}, List[{fmt:s}], Tuple[{fmt:s}]]'.format( - fmt=FASTHASHER_ITEM_ACCEPTED_FORMAT) - -def fast_hasher(data : Union[bytes, str, List[Union[bytes, str]], Tuple[Union[bytes, str]]], digest_size : int = 8): - hasher = hashlib.blake2b(digest_size=digest_size) - # Do not accept sets, dicts, or other unordered dats tructures since their order is arbitrary thus producing - # different hashes depending on the order. Consider adding support for sets or dicts with previous sorting of - # items by their key. - - if isinstance(data, bytes): - data = [data] - elif isinstance(data, str): - data = [data.encode('UTF-8')] - elif isinstance(data, (list, tuple)): - pass - else: - msg = 'data({:s}) must be {:s}, found {:s}' - raise TypeError(msg.format(str(data), FASTHASHER_DATA_ACCEPTED_FORMAT, str(type(data)))) - - for i,item in enumerate(data): - if isinstance(item, str): - item = item.encode('UTF-8') - elif isinstance(item, bytes): - pass - else: - msg = 'data[{:d}]({:s}) must be {:s}, found {:s}' - raise TypeError(msg.format(i, str(item), FASTHASHER_ITEM_ACCEPTED_FORMAT, str(type(item)))) - hasher.update(item) - return hasher.hexdigest() diff --git a/src/device/service/database/TopologyModel.py b/src/device/service/database/TopologyModel.py deleted file mode 100644 index f9e9c0b1a..000000000 --- a/src/device/service/database/TopologyModel.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -from typing import Dict -from common.orm.fields.ForeignKeyField import ForeignKeyField -from common.orm.fields.PrimaryKeyField import PrimaryKeyField -from common.orm.fields.StringField import StringField -from common.orm.model.Model import Model -from .ContextModel import ContextModel - -LOGGER = logging.getLogger(__name__) - -class TopologyModel(Model): - pk = PrimaryKeyField() - context_fk = ForeignKeyField(ContextModel) - topology_uuid = StringField(required=True, allow_empty=False) - - def dump_id(self) -> Dict: - context_id = ContextModel(self.database, self.context_fk).dump_id() - return { - 'context_id': context_id, - 'topology_uuid': {'uuid': self.topology_uuid}, - } - - def dump(self) -> Dict: - result = {'topology_id': self.dump_id()} - return result diff --git a/src/device/service/driver_api/DriverInstanceCache.py b/src/device/service/driver_api/DriverInstanceCache.py index 41cc66363..29fecf36f 100644 --- a/src/device/service/driver_api/DriverInstanceCache.py +++ b/src/device/service/driver_api/DriverInstanceCache.py @@ -12,12 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging, threading +import json, logging, threading from typing import Any, Dict, Optional +from common.method_wrappers.ServiceExceptions import InvalidArgumentException +from common.proto.context_pb2 import Device, Empty +from context.client.ContextClient import ContextClient +from device.service.Tools import get_connect_rules from ._Driver import _Driver from .DriverFactory import DriverFactory from .Exceptions import DriverInstanceCacheTerminatedException -from .FilterFields import FilterFieldEnum +from .FilterFields import FilterFieldEnum, get_device_driver_filter_fields LOGGER = logging.getLogger(__name__) @@ -30,7 +34,8 @@ class DriverInstanceCache: def get( self, device_uuid : str, filter_fields : Dict[FilterFieldEnum, Any] = {}, address : Optional[str] = None, - port : Optional[int] = None, settings : Dict[str, Any] = {}) -> _Driver: + port : Optional[int] = None, settings : Dict[str, Any] = {} + ) -> _Driver: if self._terminate.is_set(): raise DriverInstanceCacheTerminatedException() @@ -61,10 +66,44 @@ class DriverInstanceCache: self._terminate.set() with self._lock: while len(self._device_uuid__to__driver_instance) > 0: + device_uuid,device_driver = self._device_uuid__to__driver_instance.popitem() try: - device_uuid,device_driver = self._device_uuid__to__driver_instance.popitem() device_driver.Disconnect() except: # pylint: disable=bare-except msg = 'Error disconnecting Driver({:s}) from device. Will retry later...' LOGGER.exception(msg.format(device_uuid)) + # re-adding to retry disconnect self._device_uuid__to__driver_instance[device_uuid] = device_driver + +def get_driver(driver_instance_cache : DriverInstanceCache, device : Device) -> _Driver: + device_uuid = device.device_id.device_uuid.uuid + + driver : _Driver = driver_instance_cache.get(device_uuid) + if driver is not None: return driver + + driver_filter_fields = get_device_driver_filter_fields(device) + connect_rules = get_connect_rules(device.device_config) + + #LOGGER.info('[get_driver] connect_rules = {:s}'.format(str(connect_rules))) + address = connect_rules.get('address', '127.0.0.1') + port = connect_rules.get('port', '0') + settings = connect_rules.get('settings', '{}') + + try: + settings = json.loads(settings) + except ValueError as e: + raise InvalidArgumentException( + 'device.device_config.config_rules[settings]', settings, + extra_details='_connect/settings Config Rules provided cannot be decoded as JSON dictionary.' + ) from e + + driver : _Driver = driver_instance_cache.get( + device_uuid, filter_fields=driver_filter_fields, address=address, port=port, settings=settings) + driver.Connect() + + return driver + +def preload_drivers(driver_instance_cache : DriverInstanceCache) -> None: + context_client = ContextClient() + devices = context_client.ListDevices(Empty()) + for device in devices.devices: get_driver(driver_instance_cache, device) diff --git a/src/device/service/driver_api/FilterFields.py b/src/device/service/driver_api/FilterFields.py index 9ea544590..ba277e523 100644 --- a/src/device/service/driver_api/FilterFields.py +++ b/src/device/service/driver_api/FilterFields.py @@ -13,8 +13,9 @@ # limitations under the License. from enum import Enum +from typing import Any, Dict, Optional from common.DeviceTypes import DeviceTypeEnum -from device.service.database.DeviceModel import ORM_DeviceDriverEnum +from common.proto.context_pb2 import Device, DeviceDriverEnum class FilterFieldEnum(Enum): DEVICE_TYPE = 'device_type' @@ -26,8 +27,15 @@ class FilterFieldEnum(Enum): # Map allowed filter fields to allowed values per Filter field. If no restriction (free text) None is specified FILTER_FIELD_ALLOWED_VALUES = { FilterFieldEnum.DEVICE_TYPE.value : {i.value for i in DeviceTypeEnum}, - FilterFieldEnum.DRIVER.value : {i.value for i in ORM_DeviceDriverEnum}, + FilterFieldEnum.DRIVER.value : set(DeviceDriverEnum.values()), FilterFieldEnum.VENDOR.value : None, FilterFieldEnum.MODEL.value : None, FilterFieldEnum.SERIAL_NUMBER.value : None, } + +def get_device_driver_filter_fields(device : Optional[Device]) -> Dict[FilterFieldEnum, Any]: + if device is None: return {} + return { + FilterFieldEnum.DEVICE_TYPE: device.device_type, + FilterFieldEnum.DRIVER : [driver for driver in device.device_drivers], + } diff --git a/src/device/service/driver_api/Tools.py b/src/device/service/driver_api/Tools.py deleted file mode 100644 index 19c81d89b..000000000 --- a/src/device/service/driver_api/Tools.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import operator -from typing import Any, Callable, List, Tuple, Union - -ACTION_MSG_GET = 'Get resource(key={:s})' -ACTION_MSG_SET = 'Set resource(key={:s}, value={:s})' -ACTION_MSG_DELETE = 'Delete resource(key={:s}, value={:s})' -ACTION_MSG_SUBSCRIBE = 'Subscribe subscription(key={:s}, duration={:s}, interval={:s})' -ACTION_MSG_UNSUBSCRIBE = 'Unsubscribe subscription(key={:s}, duration={:s}, interval={:s})' - -def _get(resource_key : str): - return ACTION_MSG_GET.format(str(resource_key)) - -def _set(resource : Tuple[str, Any]): - return ACTION_MSG_SET.format(*tuple(map(str, resource))) - -def _delete(resource : Tuple[str, Any]): - return ACTION_MSG_SET.format(*tuple(map(str, resource))) - -def _subscribe(subscription : Tuple[str, float, float]): - return ACTION_MSG_SUBSCRIBE.format(*tuple(map(str, subscription))) - -def _unsubscribe(subscription : Tuple[str, float, float]): - return ACTION_MSG_UNSUBSCRIBE.format(*tuple(map(str, subscription))) - -def _check_errors( - error_func : Callable, parameters_list : List[Any], results_list : List[Union[bool, Exception]] - ) -> List[str]: - errors = [] - for parameters, results in zip(parameters_list, results_list): - if not isinstance(results, Exception): continue - errors.append('Unable to {:s}; error({:s})'.format(error_func(parameters), str(results))) - return errors - -def check_get_errors( - resource_keys : List[str], results : List[Tuple[str, Union[Any, None, Exception]]] - ) -> List[str]: - return _check_errors(_get, resource_keys, map(operator.itemgetter(1), results)) - -def check_set_errors( - resources : List[Tuple[str, Any]], results : List[Union[bool, Exception]] - ) -> List[str]: - return _check_errors(_set, resources, results) - -def check_delete_errors( - resources : List[Tuple[str, Any]], results : List[Union[bool, Exception]] - ) -> List[str]: - return _check_errors(_delete, resources, results) - -def check_subscribe_errors( - subscriptions : List[Tuple[str, float, float]], results : List[Union[bool, Exception]] - ) -> List[str]: - return _check_errors(_subscribe, subscriptions, results) - -def check_unsubscribe_errors( - subscriptions : List[Tuple[str, float, float]], results : List[Union[bool, Exception]] - ) -> List[str]: - return _check_errors(_unsubscribe, subscriptions, results) diff --git a/src/device/service/database/RelationModels.py b/src/device/service/drivers/emulated/Constants.py similarity index 55% rename from src/device/service/database/RelationModels.py rename to src/device/service/drivers/emulated/Constants.py index 0f6caa646..1c148c02b 100644 --- a/src/device/service/database/RelationModels.py +++ b/src/device/service/drivers/emulated/Constants.py @@ -12,16 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging -from common.orm.fields.ForeignKeyField import ForeignKeyField -from common.orm.fields.PrimaryKeyField import PrimaryKeyField -from common.orm.model.Model import Model -from .EndPointModel import EndPointMonitorModel -from .KpiModel import KpiModel +from device.service.driver_api._Driver import RESOURCE_ENDPOINTS, RESOURCE_INTERFACES, RESOURCE_NETWORK_INSTANCES -LOGGER = logging.getLogger(__name__) - -class EndPointMonitorKpiModel(Model): # pylint: disable=abstract-method - pk = PrimaryKeyField() - endpoint_monitor_fk = ForeignKeyField(EndPointMonitorModel) - kpi_fk = ForeignKeyField(KpiModel) +SPECIAL_RESOURCE_MAPPINGS = { + RESOURCE_ENDPOINTS : '/endpoints', + RESOURCE_INTERFACES : '/interfaces', + RESOURCE_NETWORK_INSTANCES: '/net-instances', +} diff --git a/src/device/service/drivers/emulated/EmulatedDriver.py b/src/device/service/drivers/emulated/EmulatedDriver.py index 6029ff660..4f5effce0 100644 --- a/src/device/service/drivers/emulated/EmulatedDriver.py +++ b/src/device/service/drivers/emulated/EmulatedDriver.py @@ -12,117 +12,25 @@ # See the License for the specific language governing permissions and # limitations under the License. -import anytree, json, logging, math, pytz, queue, random, re, threading +import anytree, json, logging, pytz, queue, re, threading from datetime import datetime, timedelta -from typing import Any, Dict, Iterator, List, Optional, Tuple, Union +from typing import Any, Iterator, List, Optional, Tuple, Union from apscheduler.executors.pool import ThreadPoolExecutor from apscheduler.job import Job from apscheduler.jobstores.memory import MemoryJobStore from apscheduler.schedulers.background import BackgroundScheduler from common.method_wrappers.Decorator import MetricTypeEnum, MetricsPool, metered_subclass_method, INF from common.type_checkers.Checkers import chk_float, chk_length, chk_string, chk_type -from device.service.database.KpiSampleType import ORM_KpiSampleTypeEnum, grpc_to_enum__kpi_sample_type -from device.service.driver_api._Driver import ( - RESOURCE_ENDPOINTS, RESOURCE_INTERFACES, RESOURCE_NETWORK_INSTANCES, - _Driver) +from device.service.driver_api._Driver import _Driver from device.service.driver_api.AnyTreeTools import TreeNode, dump_subtree, get_subnode, set_subnode_value +from .Constants import SPECIAL_RESOURCE_MAPPINGS +from .SyntheticSamplingParameters import SyntheticSamplingParameters, do_sampling +from .Tools import compose_resource_endpoint LOGGER = logging.getLogger(__name__) -SPECIAL_RESOURCE_MAPPINGS = { - RESOURCE_ENDPOINTS : '/endpoints', - RESOURCE_INTERFACES : '/interfaces', - RESOURCE_NETWORK_INSTANCES: '/net-instances', -} - -def compose_resource_endpoint(endpoint_data : Dict[str, Any]) -> Tuple[str, Any]: - endpoint_uuid = endpoint_data.get('uuid') - if endpoint_uuid is None: return None - endpoint_resource_path = SPECIAL_RESOURCE_MAPPINGS.get(RESOURCE_ENDPOINTS) - endpoint_resource_key = '{:s}/endpoint[{:s}]'.format(endpoint_resource_path, endpoint_uuid) - - endpoint_type = endpoint_data.get('type') - if endpoint_type is None: return None - - endpoint_sample_types = endpoint_data.get('sample_types') - if endpoint_sample_types is None: return None - sample_types = {} - for endpoint_sample_type in endpoint_sample_types: - try: - kpi_sample_type : ORM_KpiSampleTypeEnum = grpc_to_enum__kpi_sample_type(endpoint_sample_type) - except: # pylint: disable=bare-except - LOGGER.warning('Unknown EndpointSampleType({:s}) for Endpoint({:s}). Ignoring and continuing...'.format( - str(endpoint_sample_type), str(endpoint_data))) - continue - metric_name = kpi_sample_type.name.lower() - monitoring_resource_key = '{:s}/state/{:s}'.format(endpoint_resource_key, metric_name) - sample_types[endpoint_sample_type] = monitoring_resource_key - - endpoint_resource_value = {'uuid': endpoint_uuid, 'type': endpoint_type, 'sample_types': sample_types} - return endpoint_resource_key, endpoint_resource_value - -RE_GET_ENDPOINT_METRIC = re.compile(r'.*\/endpoint\[([^\]]+)\]\/state\/(.*)') RE_GET_ENDPOINT_FROM_INTERFACE = re.compile(r'.*\/interface\[([^\]]+)\].*') -class SyntheticSamplingParameters: - def __init__(self) -> None: - self.__lock = threading.Lock() - self.__data = {} - self.__configured_endpoints = set() - - def set_endpoint_configured(self, endpoint_uuid : str): - with self.__lock: - self.__configured_endpoints.add(endpoint_uuid) - - def unset_endpoint_configured(self, endpoint_uuid : str): - with self.__lock: - self.__configured_endpoints.discard(endpoint_uuid) - - def get(self, resource_key : str) -> Tuple[float, float, float, float]: - with self.__lock: - match = RE_GET_ENDPOINT_METRIC.match(resource_key) - if match is None: - msg = '[SyntheticSamplingParameters:get] unable to extract endpoint-metric from resource_key "{:s}"' - LOGGER.error(msg.format(resource_key)) - return (0, 0, 1, 0, 0) - endpoint_uuid = match.group(1) - - # If endpoint is not configured, generate a flat synthetic traffic aligned at 0 - if endpoint_uuid not in self.__configured_endpoints: return (0, 0, 1, 0, 0) - - metric = match.group(2) - metric_sense = metric.lower().replace('packets_', '').replace('bytes_', '') - - msg = '[SyntheticSamplingParameters:get] resource_key={:s}, endpoint_uuid={:s}, metric={:s}, metric_sense={:s}' - LOGGER.info(msg.format(resource_key, endpoint_uuid, metric, metric_sense)) - - parameters_key = '{:s}-{:s}'.format(endpoint_uuid, metric_sense) - parameters = self.__data.get(parameters_key) - if parameters is not None: return parameters - - # assume packets - amplitude = 1.e7 * random.random() - phase = 60 * random.random() - period = 3600 * random.random() - offset = 1.e8 * random.random() + amplitude - avg_bytes_per_packet = random.randint(500, 1500) - parameters = (amplitude, phase, period, offset, avg_bytes_per_packet) - return self.__data.setdefault(parameters_key, parameters) - -def do_sampling( - synthetic_sampling_parameters : SyntheticSamplingParameters, resource_key : str, out_samples : queue.Queue - ): - amplitude, phase, period, offset, avg_bytes_per_packet = synthetic_sampling_parameters.get(resource_key) - if 'bytes' in resource_key.lower(): - # convert to bytes - amplitude = avg_bytes_per_packet * amplitude - offset = avg_bytes_per_packet * offset - timestamp = datetime.timestamp(datetime.utcnow()) - waveform = amplitude * math.sin(2 * math.pi * timestamp / period + phase) + offset - noise = amplitude * random.random() - value = abs(0.95 * waveform + 0.05 * noise) - out_samples.put_nowait((timestamp, resource_key, value)) - HISTOGRAM_BUCKETS = ( # .005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, INF 0.0001, 0.00025, 0.00050, 0.00075, @@ -240,7 +148,7 @@ class EmulatedDriver(_Driver): try: resource_value = json.loads(resource_value) - except: # pylint: disable=broad-except + except: # pylint: disable=bare-except pass set_subnode_value(resolver, self.__running, resource_path, resource_value) diff --git a/src/device/service/drivers/emulated/SyntheticSamplingParameters.py b/src/device/service/drivers/emulated/SyntheticSamplingParameters.py new file mode 100644 index 000000000..65feb9d16 --- /dev/null +++ b/src/device/service/drivers/emulated/SyntheticSamplingParameters.py @@ -0,0 +1,86 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, math, queue, random, re, threading +from datetime import datetime +from typing import Optional, Tuple + +LOGGER = logging.getLogger(__name__) + +RE_GET_ENDPOINT_METRIC = re.compile(r'.*\/endpoint\[([^\]]+)\]\/state\/(.*)') + +MSG_ERROR_PARSE = '[get] unable to extract endpoint-metric from monitoring_resource_key "{:s}"' +MSG_INFO = '[get] monitoring_resource_key={:s}, endpoint_uuid={:s}, metric={:s}, metric_sense={:s}' + +class SyntheticSamplingParameters: + def __init__(self) -> None: + self.__lock = threading.Lock() + self.__data = {} + self.__configured_endpoints = set() + + def set_endpoint_configured(self, endpoint_uuid : str): + with self.__lock: + self.__configured_endpoints.add(endpoint_uuid) + + def unset_endpoint_configured(self, endpoint_uuid : str): + with self.__lock: + self.__configured_endpoints.discard(endpoint_uuid) + + def get(self, monitoring_resource_key : str) -> Optional[Tuple[float, float, float, float, float]]: + with self.__lock: + match = RE_GET_ENDPOINT_METRIC.match(monitoring_resource_key) + if match is None: + LOGGER.error(MSG_ERROR_PARSE.format(monitoring_resource_key)) + return None + endpoint_uuid = match.group(1) + + # If endpoint is not configured, generate a flat synthetic traffic aligned at 0 + if endpoint_uuid not in self.__configured_endpoints: return (0, 0, 1, 0, 0) + + metric = match.group(2) + metric_sense = metric.lower().replace('packets_', '').replace('bytes_', '') + + LOGGER.info(MSG_INFO.format(monitoring_resource_key, endpoint_uuid, metric, metric_sense)) + + parameters_key = '{:s}-{:s}'.format(endpoint_uuid, metric_sense) + parameters = self.__data.get(parameters_key) + if parameters is not None: return parameters + + # assume packets + amplitude = 1.e7 * random.random() + phase = 60 * random.random() + period = 3600 * random.random() + offset = 1.e8 * random.random() + amplitude + avg_bytes_per_packet = random.randint(500, 1500) + parameters = (amplitude, phase, period, offset, avg_bytes_per_packet) + return self.__data.setdefault(parameters_key, parameters) + +def do_sampling( + synthetic_sampling_parameters : SyntheticSamplingParameters, monitoring_resource_key : str, + out_samples : queue.Queue +) -> None: + parameters = synthetic_sampling_parameters.get(monitoring_resource_key) + if parameters is None: return + amplitude, phase, period, offset, avg_bytes_per_packet = parameters + + if 'bytes' in monitoring_resource_key.lower(): + # convert to bytes + amplitude = avg_bytes_per_packet * amplitude + offset = avg_bytes_per_packet * offset + + timestamp = datetime.timestamp(datetime.utcnow()) + waveform = amplitude * math.sin(2 * math.pi * timestamp / period + phase) + offset + noise = amplitude * random.random() + value = abs(0.95 * waveform + 0.05 * noise) + out_samples.put_nowait((timestamp, monitoring_resource_key, value)) diff --git a/src/device/service/drivers/emulated/Tools.py b/src/device/service/drivers/emulated/Tools.py new file mode 100644 index 000000000..14672c203 --- /dev/null +++ b/src/device/service/drivers/emulated/Tools.py @@ -0,0 +1,46 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from typing import Any, Dict, Tuple +from common.proto.kpi_sample_types_pb2 import KpiSampleType +from device.service.driver_api._Driver import RESOURCE_ENDPOINTS +from .Constants import SPECIAL_RESOURCE_MAPPINGS + +LOGGER = logging.getLogger(__name__) + +def compose_resource_endpoint(endpoint_data : Dict[str, Any]) -> Tuple[str, Any]: + endpoint_uuid = endpoint_data.get('uuid') + if endpoint_uuid is None: return None + endpoint_resource_path = SPECIAL_RESOURCE_MAPPINGS.get(RESOURCE_ENDPOINTS) + endpoint_resource_key = '{:s}/endpoint[{:s}]'.format(endpoint_resource_path, endpoint_uuid) + + endpoint_type = endpoint_data.get('type') + if endpoint_type is None: return None + + endpoint_sample_types = endpoint_data.get('sample_types') + if endpoint_sample_types is None: return None + + sample_types = {} + for endpoint_sample_type in endpoint_sample_types: + try: + metric_name = KpiSampleType.Name(endpoint_sample_type).lower().replace('kpisampletype_', '') + except: # pylint: disable=bare-except + LOGGER.warning('Unsupported EndPointSampleType({:s})'.format(str(endpoint_sample_type))) + continue + monitoring_resource_key = '{:s}/state/{:s}'.format(endpoint_resource_key, metric_name) + sample_types[endpoint_sample_type] = monitoring_resource_key + + endpoint_resource_value = {'uuid': endpoint_uuid, 'type': endpoint_type, 'sample_types': sample_types} + return endpoint_resource_key, endpoint_resource_value diff --git a/src/device/service/drivers/openconfig/templates/EndPoints.py b/src/device/service/drivers/openconfig/templates/EndPoints.py index 9bd2e75ac..e831d7738 100644 --- a/src/device/service/drivers/openconfig/templates/EndPoints.py +++ b/src/device/service/drivers/openconfig/templates/EndPoints.py @@ -14,7 +14,7 @@ import logging, lxml.etree as ET from typing import Any, Dict, List, Tuple -from device.service.database.KpiSampleType import ORM_KpiSampleTypeEnum +from common.proto.kpi_sample_types_pb2 import KpiSampleType from .Namespace import NAMESPACES from .Tools import add_value_from_collection, add_value_from_tag @@ -47,10 +47,10 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]: if 'type' not in endpoint: endpoint['type'] = '-' sample_types = { - ORM_KpiSampleTypeEnum.BYTES_RECEIVED.value : XPATH_IFACE_COUNTER.format(endpoint['uuid'], 'in-octets' ), - ORM_KpiSampleTypeEnum.BYTES_TRANSMITTED.value : XPATH_IFACE_COUNTER.format(endpoint['uuid'], 'out-octets'), - ORM_KpiSampleTypeEnum.PACKETS_RECEIVED.value : XPATH_IFACE_COUNTER.format(endpoint['uuid'], 'in-pkts' ), - ORM_KpiSampleTypeEnum.PACKETS_TRANSMITTED.value: XPATH_IFACE_COUNTER.format(endpoint['uuid'], 'out-pkts' ), + KpiSampleType.KPISAMPLETYPE_BYTES_RECEIVED : XPATH_IFACE_COUNTER.format(endpoint['uuid'], 'in-octets' ), + KpiSampleType.KPISAMPLETYPE_BYTES_TRANSMITTED : XPATH_IFACE_COUNTER.format(endpoint['uuid'], 'out-octets'), + KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED : XPATH_IFACE_COUNTER.format(endpoint['uuid'], 'in-pkts' ), + KpiSampleType.KPISAMPLETYPE_PACKETS_TRANSMITTED: XPATH_IFACE_COUNTER.format(endpoint['uuid'], 'out-pkts' ), } add_value_from_collection(endpoint, 'sample_types', sample_types) diff --git a/src/device/service/monitoring/MonitoringLoop.py b/src/device/service/monitoring/MonitoringLoop.py new file mode 100644 index 000000000..ec17a3ef6 --- /dev/null +++ b/src/device/service/monitoring/MonitoringLoop.py @@ -0,0 +1,43 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import queue, threading +from device.service.driver_api._Driver import _Driver + +class MonitoringLoop: + def __init__(self, device_uuid : str, driver : _Driver, samples_queue : queue.Queue) -> None: + self._device_uuid = device_uuid + self._driver = driver + self._samples_queue = samples_queue + self._running = threading.Event() + self._terminate = threading.Event() + self._samples_stream = self._driver.GetState(blocking=True, terminate=self._terminate) + self._collector_thread = threading.Thread(target=self._collect, daemon=True) + + def _collect(self) -> None: + for sample in self._samples_stream: + if self._terminate.is_set(): break + sample = (self._device_uuid, *sample) + self._samples_queue.put_nowait(sample) + + def start(self): + self._collector_thread.start() + self._running.set() + + @property + def is_running(self): return self._running.is_set() + + def stop(self): + self._terminate.set() + self._collector_thread.join() diff --git a/src/device/service/monitoring/MonitoringLoops.py b/src/device/service/monitoring/MonitoringLoops.py new file mode 100644 index 000000000..5763951fb --- /dev/null +++ b/src/device/service/monitoring/MonitoringLoops.py @@ -0,0 +1,170 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, queue, threading +from typing import Dict, Optional, Tuple, Union +from common.proto.kpi_sample_types_pb2 import KpiSampleType +from common.proto.monitoring_pb2 import Kpi +from monitoring.client.MonitoringClient import MonitoringClient +from ..driver_api._Driver import _Driver +from .MonitoringLoop import MonitoringLoop + +LOGGER = logging.getLogger(__name__) + +QUEUE_GET_WAIT_TIMEOUT = 0.5 + +def value_to_grpc(value : Union[bool, float, int, str]) -> Dict: + if isinstance(value, int): + kpi_value_field_name = 'int64Val' + kpi_value_field_cast = int + elif isinstance(value, float): + kpi_value_field_name = 'floatVal' + kpi_value_field_cast = float + elif isinstance(value, bool): + kpi_value_field_name = 'boolVal' + kpi_value_field_cast = bool + else: + kpi_value_field_name = 'stringVal' + kpi_value_field_cast = str + + return {kpi_value_field_name: kpi_value_field_cast(value)} + +TYPE_TARGET_KEY = Tuple[str, str] # (device_uuid, monitoring_resource_key) +TYPE_TARGET_KPI = Tuple[str, float, float] # (kpi_uuid, sampling_duration, sampling_interval) +TYPE_KPI_DETAIL = Tuple[str, str, float, float] # (device_uuid, monitoring_resource_key, + # sampling_duration, sampling_interval) + +class MonitoringLoops: + def __init__(self) -> None: + self._monitoring_client = MonitoringClient() + self._samples_queue = queue.Queue() + self._running = threading.Event() + self._terminate = threading.Event() + + self._lock_device_endpoint = threading.Lock() + self._device_endpoint_sampletype__to__resource_key : Dict[Tuple[str, str, int], str] = dict() + + self._lock_monitoring_loop = threading.Lock() + self._device_uuid__to__monitoring_loop : Dict[str, MonitoringLoop] = dict() + + self._lock_kpis = threading.Lock() + self._target_to_kpi : Dict[TYPE_TARGET_KEY, TYPE_TARGET_KPI] = dict() + self._kpi_to_detail : Dict[str, TYPE_KPI_DETAIL] = dict() + + self._exporter_thread = threading.Thread(target=self._export, daemon=True) + + def add_device(self, device_uuid : str, driver : _Driver) -> None: + with self._lock_monitoring_loop: + monitoring_loop = self._device_uuid__to__monitoring_loop.get(device_uuid) + if (monitoring_loop is not None) and monitoring_loop.is_running: return + monitoring_loop = MonitoringLoop(device_uuid, driver, self._samples_queue) + self._device_uuid__to__monitoring_loop[device_uuid] = monitoring_loop + monitoring_loop.start() + + def remove_device(self, device_uuid : str) -> None: + with self._lock_monitoring_loop: + monitoring_loop = self._device_uuid__to__monitoring_loop.get(device_uuid) + if monitoring_loop is None: return + if monitoring_loop.is_running: monitoring_loop.stop() + self._device_uuid__to__monitoring_loop.pop(device_uuid, None) + + def add_resource_key( + self, device_uuid : str, endpoint_uuid : str, kpi_sample_type : KpiSampleType, resource_key : str + ) -> None: + with self._lock_device_endpoint: + key = (device_uuid, endpoint_uuid, kpi_sample_type) + self._device_endpoint_sampletype__to__resource_key[key] = resource_key + + def get_resource_key( + self, device_uuid : str, endpoint_uuid : str, kpi_sample_type : KpiSampleType + ) -> Optional[str]: + with self._lock_device_endpoint: + key = (device_uuid, endpoint_uuid, kpi_sample_type) + return self._device_endpoint_sampletype__to__resource_key.get(key) + + def remove_resource_key( + self, device_uuid : str, endpoint_uuid : str, kpi_sample_type : KpiSampleType + ) -> None: + with self._lock_device_endpoint: + key = (device_uuid, endpoint_uuid, kpi_sample_type) + self._device_endpoint_sampletype__to__resource_key.pop(key, None) + + def add_kpi( + self, device_uuid : str, monitoring_resource_key : str, kpi_uuid : str, sampling_duration : float, + sampling_interval : float + ) -> None: + with self._lock_kpis: + kpi_key = (device_uuid, monitoring_resource_key) + kpi_values = (kpi_uuid, sampling_duration, sampling_interval) + self._target_to_kpi[kpi_key] = kpi_values + + kpi_details = (device_uuid, monitoring_resource_key, sampling_duration, sampling_interval) + self._kpi_to_detail[kpi_uuid] = kpi_details + + def get_kpi_by_uuid(self, kpi_uuid : str) -> Optional[TYPE_KPI_DETAIL]: + with self._lock_kpis: + return self._kpi_to_detail.get(kpi_uuid) + + def get_kpi_by_metric( + self, device_uuid : str, monitoring_resource_key : str + ) -> Optional[TYPE_TARGET_KPI]: + with self._lock_kpis: + kpi_key = (device_uuid, monitoring_resource_key) + return self._target_to_kpi.get(kpi_key) + + def remove_kpi(self, kpi_uuid : str) -> None: + with self._lock_kpis: + kpi_details = self._kpi_to_detail.pop(kpi_uuid, None) + if kpi_details is None: return + kpi_key = kpi_details[0:2] # (device_uuid, monitoring_resource_key, _, _) + self._target_to_kpi.pop(kpi_key, None) + + def start(self): + self._exporter_thread.start() + + @property + def is_running(self): return self._running.is_set() + + def stop(self): + self._terminate.set() + self._exporter_thread.join() + + def _export(self) -> None: + self._running.set() + while not self._terminate.is_set(): + try: + sample = self._samples_queue.get(block=True, timeout=QUEUE_GET_WAIT_TIMEOUT) + #LOGGER.debug('[MonitoringLoops:_export] sample={:s}'.format(str(sample))) + except queue.Empty: + continue + + device_uuid, timestamp, monitoring_resource_key, value = sample + + kpi_details = self.get_kpi_by_metric(device_uuid, monitoring_resource_key) + if kpi_details is None: + MSG = 'Kpi for Device({:s})/MonitoringResourceKey({:s}) not found' + LOGGER.warning(MSG.format(str(device_uuid), str(monitoring_resource_key))) + continue + kpi_uuid,_,_ = kpi_details + + try: + self._monitoring_client.IncludeKpi(Kpi(**{ + 'kpi_id' : {'kpi_id': {'uuid': kpi_uuid}}, + 'timestamp': {'timestamp': timestamp}, + 'kpi_value': value_to_grpc(value), + })) + except: # pylint: disable=bare-except + LOGGER.exception('Unable to format/send Kpi') + + self._running.clear() diff --git a/src/device/service/database/__init__.py b/src/device/service/monitoring/__init__.py similarity index 75% rename from src/device/service/database/__init__.py rename to src/device/service/monitoring/__init__.py index c59423e79..70a332512 100644 --- a/src/device/service/database/__init__.py +++ b/src/device/service/monitoring/__init__.py @@ -12,5 +12,3 @@ # See the License for the specific language governing permissions and # limitations under the License. -# In-Memory database with a simplified representation of Context Database focused on the Device model. -# Used as an internal configuration cache, for message validation, and message formatting purposes. -- GitLab From bc1ba71b06d70aa0e58f0cc6ad19c2427a399f8f Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Tue, 24 Jan 2023 13:29:49 +0000 Subject: [PATCH 107/158] Device component: - corrected device driver selectors --- src/device/service/drivers/__init__.py | 27 +++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/src/device/service/drivers/__init__.py b/src/device/service/drivers/__init__.py index 4e4a9ac11..bde5c93a5 100644 --- a/src/device/service/drivers/__init__.py +++ b/src/device/service/drivers/__init__.py @@ -14,7 +14,8 @@ import os from common.DeviceTypes import DeviceTypeEnum -from ..driver_api.FilterFields import FilterFieldEnum, ORM_DeviceDriverEnum +from common.proto.context_pb2 import DeviceDriverEnum +from ..driver_api.FilterFields import FilterFieldEnum TRUE_VALUES = {'T', 'TRUE', 'YES', '1'} DEVICE_EMULATED_ONLY = os.environ.get('DEVICE_EMULATED_ONLY') @@ -47,7 +48,7 @@ DRIVERS.append( #DeviceTypeEnum.PACKET_SWITCH, ], FilterFieldEnum.DRIVER: [ - ORM_DeviceDriverEnum.UNDEFINED, + DeviceDriverEnum.DEVICEDRIVER_UNDEFINED, ], }, #{ @@ -63,12 +64,12 @@ DRIVERS.append( # DeviceTypeEnum.EMULATED_PACKET_SWITCH, # ], # FilterFieldEnum.DRIVER: [ - # ORM_DeviceDriverEnum.UNDEFINED, - # ORM_DeviceDriverEnum.OPENCONFIG, - # ORM_DeviceDriverEnum.TRANSPORT_API, - # ORM_DeviceDriverEnum.P4, - # ORM_DeviceDriverEnum.IETF_NETWORK_TOPOLOGY, - # ORM_DeviceDriverEnum.ONF_TR_352, + # DeviceDriverEnum.DEVICEDRIVER_UNDEFINED, + # DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG, + # DeviceDriverEnum.DEVICEDRIVER_TRANSPORT_API, + # DeviceDriverEnum.DEVICEDRIVER_P4, + # DeviceDriverEnum.DEVICEDRIVER_IETF_NETWORK_TOPOLOGY, + # DeviceDriverEnum.DEVICEDRIVER_ONF_TR_352, # ], #} ])) @@ -80,7 +81,7 @@ if LOAD_ALL_DEVICE_DRIVERS: { # Real Packet Router, specifying OpenConfig Driver => use OpenConfigDriver FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.PACKET_ROUTER, - FilterFieldEnum.DRIVER : ORM_DeviceDriverEnum.OPENCONFIG, + FilterFieldEnum.DRIVER : DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG, } ])) @@ -91,7 +92,7 @@ if LOAD_ALL_DEVICE_DRIVERS: { # Real OLS, specifying TAPI Driver => use TransportApiDriver FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.OPEN_LINE_SYSTEM, - FilterFieldEnum.DRIVER : ORM_DeviceDriverEnum.TRANSPORT_API, + FilterFieldEnum.DRIVER : DeviceDriverEnum.DEVICEDRIVER_TRANSPORT_API, } ])) @@ -102,7 +103,7 @@ if LOAD_ALL_DEVICE_DRIVERS: { # Real P4 Switch, specifying P4 Driver => use P4Driver FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.P4_SWITCH, - FilterFieldEnum.DRIVER : ORM_DeviceDriverEnum.P4, + FilterFieldEnum.DRIVER : DeviceDriverEnum.DEVICEDRIVER_P4, } ])) @@ -112,7 +113,7 @@ if LOAD_ALL_DEVICE_DRIVERS: (IETFApiDriver, [ { FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.MICROWAVE_RADIO_SYSTEM, - FilterFieldEnum.DRIVER : ORM_DeviceDriverEnum.IETF_NETWORK_TOPOLOGY, + FilterFieldEnum.DRIVER : DeviceDriverEnum.DEVICEDRIVER_IETF_NETWORK_TOPOLOGY, } ])) @@ -123,6 +124,6 @@ if LOAD_ALL_DEVICE_DRIVERS: { # Close enough, it does optical switching FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.XR_CONSTELLATION, - FilterFieldEnum.DRIVER : ORM_DeviceDriverEnum.XR, + FilterFieldEnum.DRIVER : DeviceDriverEnum.DEVICEDRIVER_XR, } ])) -- GitLab From bba2fd04a646078ca77b6d6060e47392e7a008ed Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Tue, 24 Jan 2023 14:23:26 +0000 Subject: [PATCH 108/158] WebUI component: - corrected report of configuration rules in device - minor cosmetic code changes --- src/webui/service/__init__.py | 41 ++++++++++++------- .../service/templates/device/detail.html | 10 ++++- 2 files changed, 34 insertions(+), 17 deletions(-) diff --git a/src/webui/service/__init__.py b/src/webui/service/__init__.py index 84a43d370..d157a4f0e 100644 --- a/src/webui/service/__init__.py +++ b/src/webui/service/__init__.py @@ -13,6 +13,7 @@ # limitations under the License. import json +from typing import List, Tuple, Union from flask import Flask, request, session from flask_healthz import healthz, HealthError from context.client.ContextClient import ContextClient @@ -36,10 +37,20 @@ def readiness(): device_client.connect() device_client.close() except Exception as e: - raise HealthError('Can\'t connect with the service: ' + e.details()) + raise HealthError("Can't connect with the service: {:s}".format(str(e))) from e -def from_json(json_str): - return json.loads(json_str) +def json_to_list(json_str : str) -> List[Union[str, Tuple[str, str]]]: + try: + data = json.loads(json_str) + except: # pylint: disable=bare-except + return [('item', str(json_str))] + + if isinstance(data, dict): + return [('kv', key, value) for key, value in data.items()] + elif isinstance(data, list): + return [('item', item) for item in data] + else: + return [('item', str(data))] class SetSubAppMiddleware(): def __init__(self, app, web_app_root): @@ -63,32 +74,32 @@ def create_app(use_config=None, web_app_root=None): app.register_blueprint(healthz, url_prefix='/healthz') - from webui.service.js.routes import js + from webui.service.js.routes import js # pylint: disable=import-outside-toplevel app.register_blueprint(js) - from webui.service.main.routes import main + from webui.service.main.routes import main # pylint: disable=import-outside-toplevel app.register_blueprint(main) - from webui.service.load_gen.routes import load_gen + from webui.service.load_gen.routes import load_gen # pylint: disable=import-outside-toplevel app.register_blueprint(load_gen) - from webui.service.service.routes import service + from webui.service.service.routes import service # pylint: disable=import-outside-toplevel app.register_blueprint(service) - from webui.service.slice.routes import slice + from webui.service.slice.routes import slice # pylint: disable=import-outside-toplevel,redefined-builtin app.register_blueprint(slice) - from webui.service.device.routes import device + from webui.service.device.routes import device # pylint: disable=import-outside-toplevel app.register_blueprint(device) - from webui.service.link.routes import link + from webui.service.link.routes import link # pylint: disable=import-outside-toplevel app.register_blueprint(link) - - app.jinja_env.filters['from_json'] = from_json - - app.jinja_env.globals.update(get_working_context=get_working_context) - app.jinja_env.globals.update(get_working_topology=get_working_topology) + app.jinja_env.globals.update({ # pylint: disable=no-member + 'json_to_list' : json_to_list, + 'get_working_context' : get_working_context, + 'get_working_topology': get_working_topology, + }) if web_app_root is not None: app.wsgi_app = SetSubAppMiddleware(app.wsgi_app, web_app_root) diff --git a/src/webui/service/templates/device/detail.html b/src/webui/service/templates/device/detail.html index db1d9a8ef..b0f2b560e 100644 --- a/src/webui/service/templates/device/detail.html +++ b/src/webui/service/templates/device/detail.html @@ -103,8 +103,14 @@ </td> <td> <ul> - {% for key, value in (config.custom.resource_value | from_json).items() %} - <li><b>{{ key }}:</b> {{ value }}</li> + {% for item_type, item in json_to_list(config.custom.resource_value) %} + {% if item_type == 'item' %} + <li>{{ ', '.join(item[1:]) }}</li> + {% elif item_type == 'kv' %} + <li><b>{{ item[1] }}:</b> {{ item[2] }}</li> + {% else %} + <li>{{ item }}</li> + {% endif %} {% endfor %} </ul> </td> -- GitLab From 708d817703f86a6b9828709a207e850e17b16c18 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Tue, 24 Jan 2023 14:38:46 +0000 Subject: [PATCH 109/158] WebUI component: - corrected report of configuration rules in device --- src/webui/service/__init__.py | 2 +- src/webui/service/templates/device/detail.html | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/webui/service/__init__.py b/src/webui/service/__init__.py index d157a4f0e..fdae45c63 100644 --- a/src/webui/service/__init__.py +++ b/src/webui/service/__init__.py @@ -46,7 +46,7 @@ def json_to_list(json_str : str) -> List[Union[str, Tuple[str, str]]]: return [('item', str(json_str))] if isinstance(data, dict): - return [('kv', key, value) for key, value in data.items()] + return [('kv', (key, value)) for key, value in data.items()] elif isinstance(data, list): return [('item', item) for item in data] else: diff --git a/src/webui/service/templates/device/detail.html b/src/webui/service/templates/device/detail.html index b0f2b560e..04a3423cb 100644 --- a/src/webui/service/templates/device/detail.html +++ b/src/webui/service/templates/device/detail.html @@ -107,7 +107,7 @@ {% if item_type == 'item' %} <li>{{ ', '.join(item[1:]) }}</li> {% elif item_type == 'kv' %} - <li><b>{{ item[1] }}:</b> {{ item[2] }}</li> + <li><b>{{ item[0] }}:</b> {{ item[1] }}</li> {% else %} <li>{{ item }}</li> {% endif %} -- GitLab From b3bf3cf174a8cdc62c29d548460934b25b3c6534 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Tue, 24 Jan 2023 14:45:45 +0000 Subject: [PATCH 110/158] WebUI component: - corrected report of configuration rules in device --- src/webui/service/__init__.py | 2 +- src/webui/service/templates/device/detail.html | 4 +--- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/src/webui/service/__init__.py b/src/webui/service/__init__.py index fdae45c63..94bc91429 100644 --- a/src/webui/service/__init__.py +++ b/src/webui/service/__init__.py @@ -48,7 +48,7 @@ def json_to_list(json_str : str) -> List[Union[str, Tuple[str, str]]]: if isinstance(data, dict): return [('kv', (key, value)) for key, value in data.items()] elif isinstance(data, list): - return [('item', item) for item in data] + return [('item', ', '.join(data))] else: return [('item', str(data))] diff --git a/src/webui/service/templates/device/detail.html b/src/webui/service/templates/device/detail.html index 04a3423cb..adf503952 100644 --- a/src/webui/service/templates/device/detail.html +++ b/src/webui/service/templates/device/detail.html @@ -104,9 +104,7 @@ <td> <ul> {% for item_type, item in json_to_list(config.custom.resource_value) %} - {% if item_type == 'item' %} - <li>{{ ', '.join(item[1:]) }}</li> - {% elif item_type == 'kv' %} + {% if item_type == 'kv' %} <li><b>{{ item[0] }}:</b> {{ item[1] }}</li> {% else %} <li>{{ item }}</li> -- GitLab From 385de9ac7cc9df7905d416135577529eebcb9cec Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Tue, 24 Jan 2023 15:24:23 +0000 Subject: [PATCH 111/158] Device component: - corrected populate_endpoints method --- src/device/service/Tools.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/device/service/Tools.py b/src/device/service/Tools.py index 086e5a071..0698be883 100644 --- a/src/device/service/Tools.py +++ b/src/device/service/Tools.py @@ -102,8 +102,8 @@ def populate_endpoints(device : Device, driver : _Driver, monitoring_loops : Mon endpoint_uuid = resource_value.get('uuid') device_endpoint = device.device_endpoints.add() - device_endpoint.topology_id.context_id.context_uuid.uuid = DEFAULT_CONTEXT_NAME - device_endpoint.topology_id.topology_uuid.uuid = DEFAULT_TOPOLOGY_NAME + device_endpoint.endpoint_id.topology_id.context_id.context_uuid.uuid = DEFAULT_CONTEXT_NAME + device_endpoint.endpoint_id.topology_id.topology_uuid.uuid = DEFAULT_TOPOLOGY_NAME device_endpoint.endpoint_id.device_id.device_uuid.uuid = device_uuid device_endpoint.endpoint_id.endpoint_uuid.uuid = endpoint_uuid device_endpoint.endpoint_type = resource_value.get('type') -- GitLab From b099f36f2477deebaa75aa87cd060098726e2f95 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Tue, 24 Jan 2023 15:24:44 +0000 Subject: [PATCH 112/158] WebUI component: - updated dump of config rules in service and slice entities --- src/webui/service/templates/service/detail.html | 8 ++++++-- src/webui/service/templates/slice/detail.html | 8 ++++++-- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/src/webui/service/templates/service/detail.html b/src/webui/service/templates/service/detail.html index e1f963e42..9167f0016 100644 --- a/src/webui/service/templates/service/detail.html +++ b/src/webui/service/templates/service/detail.html @@ -159,8 +159,12 @@ </td> <td> <ul> - {% for key, value in (config.custom.resource_value | from_json).items() %} - <li><b>{{ key }}:</b> {{ value }}</li> + {% for item_type, item in json_to_list(config.custom.resource_value) %} + {% if item_type == 'kv' %} + <li><b>{{ item[0] }}:</b> {{ item[1] }}</li> + {% else %} + <li>{{ item }}</li> + {% endif %} {% endfor %} </ul> </td> diff --git a/src/webui/service/templates/slice/detail.html b/src/webui/service/templates/slice/detail.html index 889e10ce5..9bd4eb0d9 100644 --- a/src/webui/service/templates/slice/detail.html +++ b/src/webui/service/templates/slice/detail.html @@ -160,8 +160,12 @@ </td> <td> <ul> - {% for key, value in (config.custom.resource_value | from_json).items() %} - <li><b>{{ key }}:</b> {{ value }}</li> + {% for item_type, item in json_to_list(config.custom.resource_value) %} + {% if item_type == 'kv' %} + <li><b>{{ item[0] }}:</b> {{ item[1] }}</li> + {% else %} + <li>{{ item }}</li> + {% endif %} {% endfor %} </ul> </td> -- GitLab From ae9705af90484815b299b069c80b40b12dd79e8f Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Tue, 24 Jan 2023 16:24:27 +0000 Subject: [PATCH 113/158] Common - MockServicers: - added missing log messages in MockContext --- src/common/tests/MockServicerImpl_Context.py | 140 ++++++++++++++----- 1 file changed, 105 insertions(+), 35 deletions(-) diff --git a/src/common/tests/MockServicerImpl_Context.py b/src/common/tests/MockServicerImpl_Context.py index 27ff45fc5..f81a18135 100644 --- a/src/common/tests/MockServicerImpl_Context.py +++ b/src/common/tests/MockServicerImpl_Context.py @@ -103,23 +103,33 @@ class MockServicerImpl_Context(ContextServiceServicer): def ListContextIds(self, request: Empty, context : grpc.ServicerContext) -> ContextIdList: LOGGER.info('[ListContextIds] request={:s}'.format(grpc_message_to_json_string(request))) - return ContextIdList(context_ids=[context.context_id for context in get_entries(self.database, 'context')]) + reply = ContextIdList(context_ids=[context.context_id for context in get_entries(self.database, 'context')]) + LOGGER.info('[ListContextIds] reply={:s}'.format(grpc_message_to_json_string(reply))) + return reply def ListContexts(self, request: Empty, context : grpc.ServicerContext) -> ContextList: LOGGER.info('[ListContexts] request={:s}'.format(grpc_message_to_json_string(request))) - return ContextList(contexts=get_entries(self.database, 'context')) + reply = ContextList(contexts=get_entries(self.database, 'context')) + LOGGER.info('[ListContexts] reply={:s}'.format(grpc_message_to_json_string(reply))) + return reply def GetContext(self, request: ContextId, context : grpc.ServicerContext) -> Context: LOGGER.info('[GetContext] request={:s}'.format(grpc_message_to_json_string(request))) - return get_entry(context, self.database, 'context', request.context_uuid.uuid) + reply = get_entry(context, self.database, 'context', request.context_uuid.uuid) + LOGGER.info('[GetContext] reply={:s}'.format(grpc_message_to_json_string(reply))) + return reply def SetContext(self, request: Context, context : grpc.ServicerContext) -> ContextId: LOGGER.info('[SetContext] request={:s}'.format(grpc_message_to_json_string(request))) - return self._set(request, 'context', request.context_id.context_uuid.uuid, 'context_id', TOPIC_CONTEXT) + reply = self._set(request, 'context', request.context_id.context_uuid.uuid, 'context_id', TOPIC_CONTEXT) + LOGGER.info('[SetContext] reply={:s}'.format(grpc_message_to_json_string(reply))) + return reply def RemoveContext(self, request: ContextId, context : grpc.ServicerContext) -> Empty: LOGGER.info('[RemoveContext] request={:s}'.format(grpc_message_to_json_string(request))) - return self._del(request, 'context', request.context_uuid.uuid, 'context_id', TOPIC_CONTEXT, context) + reply = self._del(request, 'context', request.context_uuid.uuid, 'context_id', TOPIC_CONTEXT, context) + LOGGER.info('[RemoveContext] reply={:s}'.format(grpc_message_to_json_string(reply))) + return reply def GetContextEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[ContextEvent]: LOGGER.info('[GetContextEvents] request={:s}'.format(grpc_message_to_json_string(request))) @@ -131,29 +141,39 @@ class MockServicerImpl_Context(ContextServiceServicer): def ListTopologyIds(self, request: ContextId, context : grpc.ServicerContext) -> TopologyIdList: LOGGER.info('[ListTopologyIds] request={:s}'.format(grpc_message_to_json_string(request))) topologies = get_entries(self.database, 'topology[{:s}]'.format(str(request.context_uuid.uuid))) - return TopologyIdList(topology_ids=[topology.topology_id for topology in topologies]) + reply = TopologyIdList(topology_ids=[topology.topology_id for topology in topologies]) + LOGGER.info('[ListTopologyIds] reply={:s}'.format(grpc_message_to_json_string(reply))) + return reply def ListTopologies(self, request: ContextId, context : grpc.ServicerContext) -> TopologyList: LOGGER.info('[ListTopologies] request={:s}'.format(grpc_message_to_json_string(request))) topologies = get_entries(self.database, 'topology[{:s}]'.format(str(request.context_uuid.uuid))) - return TopologyList(topologies=[topology for topology in topologies]) + reply = TopologyList(topologies=[topology for topology in topologies]) + LOGGER.info('[ListTopologies] reply={:s}'.format(grpc_message_to_json_string(reply))) + return reply def GetTopology(self, request: TopologyId, context : grpc.ServicerContext) -> Topology: LOGGER.info('[GetTopology] request={:s}'.format(grpc_message_to_json_string(request))) container_name = 'topology[{:s}]'.format(str(request.context_id.context_uuid.uuid)) - return get_entry(context, self.database, container_name, request.topology_uuid.uuid) + reply = get_entry(context, self.database, container_name, request.topology_uuid.uuid) + LOGGER.info('[GetTopology] reply={:s}'.format(grpc_message_to_json_string(reply))) + return reply def SetTopology(self, request: Topology, context : grpc.ServicerContext) -> TopologyId: LOGGER.info('[SetTopology] request={:s}'.format(grpc_message_to_json_string(request))) container_name = 'topology[{:s}]'.format(str(request.topology_id.context_id.context_uuid.uuid)) topology_uuid = request.topology_id.topology_uuid.uuid - return self._set(request, container_name, topology_uuid, 'topology_id', TOPIC_TOPOLOGY) + reply = self._set(request, container_name, topology_uuid, 'topology_id', TOPIC_TOPOLOGY) + LOGGER.info('[SetTopology] reply={:s}'.format(grpc_message_to_json_string(reply))) + return reply def RemoveTopology(self, request: TopologyId, context : grpc.ServicerContext) -> Empty: LOGGER.info('[RemoveTopology] request={:s}'.format(grpc_message_to_json_string(request))) container_name = 'topology[{:s}]'.format(str(request.context_id.context_uuid.uuid)) topology_uuid = request.topology_uuid.uuid - return self._del(request, container_name, topology_uuid, 'topology_id', TOPIC_TOPOLOGY, context) + reply = self._del(request, container_name, topology_uuid, 'topology_id', TOPIC_TOPOLOGY, context) + LOGGER.info('[RemoveTopology] reply={:s}'.format(grpc_message_to_json_string(reply))) + return reply def GetTopologyEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[TopologyEvent]: LOGGER.info('[GetTopologyEvents] request={:s}'.format(grpc_message_to_json_string(request))) @@ -164,23 +184,33 @@ class MockServicerImpl_Context(ContextServiceServicer): def ListDeviceIds(self, request: Empty, context : grpc.ServicerContext) -> DeviceIdList: LOGGER.info('[ListDeviceIds] request={:s}'.format(grpc_message_to_json_string(request))) - return DeviceIdList(device_ids=[device.device_id for device in get_entries(self.database, 'device')]) + reply = DeviceIdList(device_ids=[device.device_id for device in get_entries(self.database, 'device')]) + LOGGER.info('[ListDeviceIds] reply={:s}'.format(grpc_message_to_json_string(reply))) + return reply def ListDevices(self, request: Empty, context : grpc.ServicerContext) -> DeviceList: LOGGER.info('[ListDevices] request={:s}'.format(grpc_message_to_json_string(request))) - return DeviceList(devices=get_entries(self.database, 'device')) + reply = DeviceList(devices=get_entries(self.database, 'device')) + LOGGER.info('[ListDevices] reply={:s}'.format(grpc_message_to_json_string(reply))) + return reply def GetDevice(self, request: DeviceId, context : grpc.ServicerContext) -> Device: LOGGER.info('[GetDevice] request={:s}'.format(grpc_message_to_json_string(request))) - return get_entry(context, self.database, 'device', request.device_uuid.uuid) + reply = get_entry(context, self.database, 'device', request.device_uuid.uuid) + LOGGER.info('[GetDevice] reply={:s}'.format(grpc_message_to_json_string(reply))) + return reply def SetDevice(self, request: Context, context : grpc.ServicerContext) -> DeviceId: LOGGER.info('[SetDevice] request={:s}'.format(grpc_message_to_json_string(request))) - return self._set(request, 'device', request.device_id.device_uuid.uuid, 'device_id', TOPIC_DEVICE) + reply = self._set(request, 'device', request.device_id.device_uuid.uuid, 'device_id', TOPIC_DEVICE) + LOGGER.info('[SetDevice] reply={:s}'.format(grpc_message_to_json_string(reply))) + return reply def RemoveDevice(self, request: DeviceId, context : grpc.ServicerContext) -> Empty: LOGGER.info('[RemoveDevice] request={:s}'.format(grpc_message_to_json_string(request))) - return self._del(request, 'device', request.device_uuid.uuid, 'device_id', TOPIC_DEVICE, context) + reply = self._del(request, 'device', request.device_uuid.uuid, 'device_id', TOPIC_DEVICE, context) + LOGGER.info('[RemoveDevice] reply={:s}'.format(grpc_message_to_json_string(reply))) + return reply def GetDeviceEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[DeviceEvent]: LOGGER.info('[GetDeviceEvents] request={:s}'.format(grpc_message_to_json_string(request))) @@ -191,23 +221,33 @@ class MockServicerImpl_Context(ContextServiceServicer): def ListLinkIds(self, request: Empty, context : grpc.ServicerContext) -> LinkIdList: LOGGER.info('[ListLinkIds] request={:s}'.format(grpc_message_to_json_string(request))) - return LinkIdList(link_ids=[link.link_id for link in get_entries(self.database, 'link')]) + reply = LinkIdList(link_ids=[link.link_id for link in get_entries(self.database, 'link')]) + LOGGER.info('[ListLinkIds] reply={:s}'.format(grpc_message_to_json_string(reply))) + return reply def ListLinks(self, request: Empty, context : grpc.ServicerContext) -> LinkList: LOGGER.info('[ListLinks] request={:s}'.format(grpc_message_to_json_string(request))) - return LinkList(links=get_entries(self.database, 'link')) + reply = LinkList(links=get_entries(self.database, 'link')) + LOGGER.info('[ListLinks] reply={:s}'.format(grpc_message_to_json_string(reply))) + return reply def GetLink(self, request: LinkId, context : grpc.ServicerContext) -> Link: LOGGER.info('[GetLink] request={:s}'.format(grpc_message_to_json_string(request))) - return get_entry(context, self.database, 'link', request.link_uuid.uuid) + reply = get_entry(context, self.database, 'link', request.link_uuid.uuid) + LOGGER.info('[GetLink] reply={:s}'.format(grpc_message_to_json_string(reply))) + return reply def SetLink(self, request: Context, context : grpc.ServicerContext) -> LinkId: LOGGER.info('[SetLink] request={:s}'.format(grpc_message_to_json_string(request))) - return self._set(request, 'link', request.link_id.link_uuid.uuid, 'link_id', TOPIC_LINK) + reply = self._set(request, 'link', request.link_id.link_uuid.uuid, 'link_id', TOPIC_LINK) + LOGGER.info('[SetLink] reply={:s}'.format(grpc_message_to_json_string(reply))) + return reply def RemoveLink(self, request: LinkId, context : grpc.ServicerContext) -> Empty: LOGGER.info('[RemoveLink] request={:s}'.format(grpc_message_to_json_string(request))) - return self._del(request, 'link', request.link_uuid.uuid, 'link_id', TOPIC_LINK, context) + reply = self._del(request, 'link', request.link_uuid.uuid, 'link_id', TOPIC_LINK, context) + LOGGER.info('[RemoveLink] reply={:s}'.format(grpc_message_to_json_string(reply))) + return reply def GetLinkEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[LinkEvent]: LOGGER.info('[GetLinkEvents] request={:s}'.format(grpc_message_to_json_string(request))) @@ -219,29 +259,39 @@ class MockServicerImpl_Context(ContextServiceServicer): def ListSliceIds(self, request: ContextId, context : grpc.ServicerContext) -> SliceIdList: LOGGER.info('[ListSliceIds] request={:s}'.format(grpc_message_to_json_string(request))) slices = get_entries(self.database, 'slice[{:s}]'.format(str(request.context_uuid.uuid))) - return SliceIdList(slice_ids=[slice.slice_id for slice in slices]) + reply = SliceIdList(slice_ids=[slice.slice_id for slice in slices]) + LOGGER.info('[ListSliceIds] reply={:s}'.format(grpc_message_to_json_string(reply))) + return reply def ListSlices(self, request: ContextId, context : grpc.ServicerContext) -> SliceList: LOGGER.info('[ListSlices] request={:s}'.format(grpc_message_to_json_string(request))) slices = get_entries(self.database, 'slice[{:s}]'.format(str(request.context_uuid.uuid))) - return SliceList(slices=[slice for slice in slices]) + reply = SliceList(slices=[slice for slice in slices]) + LOGGER.info('[ListSlices] reply={:s}'.format(grpc_message_to_json_string(reply))) + return reply def GetSlice(self, request: SliceId, context : grpc.ServicerContext) -> Slice: LOGGER.info('[GetSlice] request={:s}'.format(grpc_message_to_json_string(request))) container_name = 'slice[{:s}]'.format(str(request.context_id.context_uuid.uuid)) - return get_entry(context, self.database, container_name, request.slice_uuid.uuid) + reply = get_entry(context, self.database, container_name, request.slice_uuid.uuid) + LOGGER.info('[GetSlice] reply={:s}'.format(grpc_message_to_json_string(reply))) + return reply def SetSlice(self, request: Slice, context : grpc.ServicerContext) -> SliceId: LOGGER.info('[SetSlice] request={:s}'.format(grpc_message_to_json_string(request))) container_name = 'slice[{:s}]'.format(str(request.slice_id.context_id.context_uuid.uuid)) slice_uuid = request.slice_id.slice_uuid.uuid - return self._set(request, container_name, slice_uuid, 'slice_id', TOPIC_SLICE) + reply = self._set(request, container_name, slice_uuid, 'slice_id', TOPIC_SLICE) + LOGGER.info('[SetSlice] reply={:s}'.format(grpc_message_to_json_string(reply))) + return reply def RemoveSlice(self, request: SliceId, context : grpc.ServicerContext) -> Empty: LOGGER.info('[RemoveSlice] request={:s}'.format(grpc_message_to_json_string(request))) container_name = 'slice[{:s}]'.format(str(request.context_id.context_uuid.uuid)) slice_uuid = request.slice_uuid.uuid - return self._del(request, container_name, slice_uuid, 'slice_id', TOPIC_SLICE, context) + reply = self._del(request, container_name, slice_uuid, 'slice_id', TOPIC_SLICE, context) + LOGGER.info('[RemoveSlice] reply={:s}'.format(grpc_message_to_json_string(reply))) + return reply def GetSliceEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[SliceEvent]: LOGGER.info('[GetSliceEvents] request={:s}'.format(grpc_message_to_json_string(request))) @@ -253,29 +303,39 @@ class MockServicerImpl_Context(ContextServiceServicer): def ListServiceIds(self, request: ContextId, context : grpc.ServicerContext) -> ServiceIdList: LOGGER.info('[ListServiceIds] request={:s}'.format(grpc_message_to_json_string(request))) services = get_entries(self.database, 'service[{:s}]'.format(str(request.context_uuid.uuid))) - return ServiceIdList(service_ids=[service.service_id for service in services]) + reply = ServiceIdList(service_ids=[service.service_id for service in services]) + LOGGER.info('[ListServiceIds] reply={:s}'.format(grpc_message_to_json_string(reply))) + return reply def ListServices(self, request: ContextId, context : grpc.ServicerContext) -> ServiceList: LOGGER.info('[ListServices] request={:s}'.format(grpc_message_to_json_string(request))) services = get_entries(self.database, 'service[{:s}]'.format(str(request.context_uuid.uuid))) - return ServiceList(services=[service for service in services]) + reply = ServiceList(services=[service for service in services]) + LOGGER.info('[ListServices] reply={:s}'.format(grpc_message_to_json_string(reply))) + return reply def GetService(self, request: ServiceId, context : grpc.ServicerContext) -> Service: LOGGER.info('[GetService] request={:s}'.format(grpc_message_to_json_string(request))) container_name = 'service[{:s}]'.format(str(request.context_id.context_uuid.uuid)) - return get_entry(context, self.database, container_name, request.service_uuid.uuid) + reply = get_entry(context, self.database, container_name, request.service_uuid.uuid) + LOGGER.info('[GetService] reply={:s}'.format(grpc_message_to_json_string(reply))) + return reply def SetService(self, request: Service, context : grpc.ServicerContext) -> ServiceId: LOGGER.info('[SetService] request={:s}'.format(grpc_message_to_json_string(request))) container_name = 'service[{:s}]'.format(str(request.service_id.context_id.context_uuid.uuid)) service_uuid = request.service_id.service_uuid.uuid - return self._set(request, container_name, service_uuid, 'service_id', TOPIC_SERVICE) + reply = self._set(request, container_name, service_uuid, 'service_id', TOPIC_SERVICE) + LOGGER.info('[SetService] reply={:s}'.format(grpc_message_to_json_string(reply))) + return reply def RemoveService(self, request: ServiceId, context : grpc.ServicerContext) -> Empty: LOGGER.info('[RemoveService] request={:s}'.format(grpc_message_to_json_string(request))) container_name = 'service[{:s}]'.format(str(request.context_id.context_uuid.uuid)) service_uuid = request.service_uuid.uuid - return self._del(request, container_name, service_uuid, 'service_id', TOPIC_SERVICE, context) + reply = self._del(request, container_name, service_uuid, 'service_id', TOPIC_SERVICE, context) + LOGGER.info('[RemoveService] reply={:s}'.format(grpc_message_to_json_string(reply))) + return reply def GetServiceEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[ServiceEvent]: LOGGER.info('[GetServiceEvents] request={:s}'.format(grpc_message_to_json_string(request))) @@ -288,17 +348,23 @@ class MockServicerImpl_Context(ContextServiceServicer): LOGGER.info('[ListConnectionIds] request={:s}'.format(grpc_message_to_json_string(request))) container_name = 'service_connections[{:s}/{:s}]'.format( str(request.context_id.context_uuid.uuid), str(request.service_uuid.uuid)) - return ConnectionIdList(connection_ids=[c.connection_id for c in get_entries(self.database, container_name)]) + reply = ConnectionIdList(connection_ids=[c.connection_id for c in get_entries(self.database, container_name)]) + LOGGER.info('[ListConnectionIds] reply={:s}'.format(grpc_message_to_json_string(reply))) + return reply def ListConnections(self, request: ServiceId, context : grpc.ServicerContext) -> ConnectionList: LOGGER.info('[ListConnections] request={:s}'.format(grpc_message_to_json_string(request))) container_name = 'service_connections[{:s}/{:s}]'.format( str(request.context_id.context_uuid.uuid), str(request.service_uuid.uuid)) - return ConnectionList(connections=get_entries(self.database, container_name)) + reply = ConnectionList(connections=get_entries(self.database, container_name)) + LOGGER.info('[ListConnections] reply={:s}'.format(grpc_message_to_json_string(reply))) + return reply def GetConnection(self, request: ConnectionId, context : grpc.ServicerContext) -> Connection: LOGGER.info('[GetConnection] request={:s}'.format(grpc_message_to_json_string(request))) - return get_entry(context, self.database, 'connection', request.connection_uuid.uuid) + reply = get_entry(context, self.database, 'connection', request.connection_uuid.uuid) + LOGGER.info('[GetConnection] reply={:s}'.format(grpc_message_to_json_string(reply))) + return reply def SetConnection(self, request: Connection, context : grpc.ServicerContext) -> ConnectionId: LOGGER.info('[SetConnection] request={:s}'.format(grpc_message_to_json_string(request))) @@ -306,7 +372,9 @@ class MockServicerImpl_Context(ContextServiceServicer): str(request.service_id.context_id.context_uuid.uuid), str(request.service_id.service_uuid.uuid)) connection_uuid = request.connection_id.connection_uuid.uuid set_entry(self.database, container_name, connection_uuid, request) - return self._set(request, 'connection', connection_uuid, 'connection_id', TOPIC_CONNECTION) + reply = self._set(request, 'connection', connection_uuid, 'connection_id', TOPIC_CONNECTION) + LOGGER.info('[SetConnection] reply={:s}'.format(grpc_message_to_json_string(reply))) + return reply def RemoveConnection(self, request: ConnectionId, context : grpc.ServicerContext) -> Empty: LOGGER.info('[RemoveConnection] request={:s}'.format(grpc_message_to_json_string(request))) @@ -315,7 +383,9 @@ class MockServicerImpl_Context(ContextServiceServicer): str(connection.service_id.context_id.context_uuid.uuid), str(connection.service_id.service_uuid.uuid)) connection_uuid = request.connection_uuid.uuid del_entry(context, self.database, container_name, connection_uuid) - return self._del(request, 'connection', connection_uuid, 'connection_id', TOPIC_CONNECTION, context) + reply = self._del(request, 'connection', connection_uuid, 'connection_id', TOPIC_CONNECTION, context) + LOGGER.info('[RemoveConnection] reply={:s}'.format(grpc_message_to_json_string(reply))) + return reply def GetConnectionEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[ConnectionEvent]: LOGGER.info('[GetConnectionEvents] request={:s}'.format(grpc_message_to_json_string(request))) -- GitLab From 11731ae4b4706d6e738bb7dd40b2d0f6dc49d004 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Tue, 24 Jan 2023 16:27:55 +0000 Subject: [PATCH 114/158] Device component: - aggregated error messages in separate file - corrected update of operational status - corrected update of resulting device configuration from device instead of composing from rules (for simplicity) - corrected retrieval of device_uuid in MonitorDeviceKpi - factorized code to compose gRPC device_config rules from raw config rules - code cleanup - added missing logs in unitary test for emulated device driver --- .../service/DeviceServiceServicerImpl.py | 29 +++-- src/device/service/Errors.py | 30 +++++ src/device/service/Tools.py | 113 +++++++----------- src/device/tests/Device_Emulated.py | 2 +- src/device/tests/test_unitary_emulated.py | 3 + 5 files changed, 92 insertions(+), 85 deletions(-) create mode 100644 src/device/service/Errors.py diff --git a/src/device/service/DeviceServiceServicerImpl.py b/src/device/service/DeviceServiceServicerImpl.py index 9d0f9bd3e..628b0884f 100644 --- a/src/device/service/DeviceServiceServicerImpl.py +++ b/src/device/service/DeviceServiceServicerImpl.py @@ -15,12 +15,13 @@ import grpc, logging from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method from common.method_wrappers.ServiceExceptions import NotFoundException, OperationFailedException -from common.proto.context_pb2 import Device, DeviceConfig, DeviceId, Empty +from common.proto.context_pb2 import Device, DeviceConfig, DeviceId, DeviceOperationalStatusEnum, Empty from common.proto.device_pb2 import MonitoringSettings from common.proto.device_pb2_grpc import DeviceServiceServicer from common.tools.context_queries.Device import get_device from common.tools.mutex_queues.MutexQueues import MutexQueues from context.client.ContextClient import ContextClient +from device.service.Errors import ERROR_MISSING_DRIVER, ERROR_MISSING_KPI from .driver_api._Driver import _Driver from .driver_api.DriverInstanceCache import DriverInstanceCache, get_driver from .monitoring.MonitoringLoops import MonitoringLoops @@ -32,8 +33,6 @@ LOGGER = logging.getLogger(__name__) METRICS_POOL = MetricsPool('Device', 'RPC') -ERROR_MISSING_DRIVER = 'Device({:s}) has not been added to this Device instance' - class DeviceServiceServicerImpl(DeviceServiceServicer): def __init__(self, driver_instance_cache : DriverInstanceCache, monitoring_loops : MonitoringLoops) -> None: LOGGER.debug('Creating Servicer...') @@ -97,6 +96,9 @@ class DeviceServiceServicerImpl(DeviceServiceServicer): msg = ERROR_MISSING_DRIVER.format(str(device_uuid)) raise OperationFailedException('ConfigureDevice', extra_details=msg) + if request.device_operational_status != DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_UNDEFINED: + device.device_operational_status = request.device_operational_status + # TODO: use of datastores (might be virtual ones) to enable rollbacks resources_to_set, resources_to_delete = compute_rules_to_add_delete(device, request) @@ -110,13 +112,8 @@ class DeviceServiceServicerImpl(DeviceServiceServicer): # Rules updated by configure_rules() and deconfigure_rules() methods. # Code to be removed soon if not needed. - #running_config_rules = driver.GetConfig() - #for config_rule in running_config_rules: - # if isinstance(config_rule[1], Exception): continue - # config_rule = device.device_config.config_rules.add() - # config_rule.action = ConfigActionEnum.CONFIGACTION_SET - # config_rule.custom.resource_key = config_rule[0] - # config_rule.custom.resource_value = json.dumps(config_rule[1], sort_keys=True) + del device.device_config.config_rules[:] + populate_config_rules(device, driver) device_id = context_client.SetDevice(device) return device_id @@ -161,10 +158,20 @@ class DeviceServiceServicerImpl(DeviceServiceServicer): @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def MonitorDeviceKpi(self, request : MonitoringSettings, context : grpc.ServicerContext) -> Empty: - device_uuid = request.kpi_descriptor.device_id.device_uuid.uuid subscribe = (request.sampling_duration_s > 0.0) and (request.sampling_interval_s > 0.0) manage_kpi_method = subscribe_kpi if subscribe else unsubscribe_kpi + if subscribe: + device_uuid = request.kpi_descriptor.device_id.device_uuid.uuid + else: + # unsubscribe only carries kpi_uuid; take device_uuid from recorded KPIs + kpi_uuid = request.kpi_id.kpi_id.uuid + kpi_details = self.monitoring_loops.get_kpi_by_uuid(kpi_uuid) + if kpi_details is None: + msg = ERROR_MISSING_KPI.format(str(kpi_uuid)) + raise OperationFailedException('MonitorDeviceKpi', extra_details=msg) + device_uuid = kpi_details[0] + self.mutex_queues.wait_my_turn(device_uuid) try: driver : _Driver = self.driver_instance_cache.get(device_uuid) diff --git a/src/device/service/Errors.py b/src/device/service/Errors.py new file mode 100644 index 000000000..5f2fc4996 --- /dev/null +++ b/src/device/service/Errors.py @@ -0,0 +1,30 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +ERROR_MISSING_DRIVER = 'Device({:s}) has not been added to this Device instance' +ERROR_MISSING_KPI = 'Kpi({:s}) not found' + +ERROR_BAD_ENDPOINT = 'Device({:s}): GetConfig retrieved malformed Endpoint({:s})' + +ERROR_GET = 'Device({:s}): Unable to Get resource(key={:s}); error({:s})' +ERROR_GET_INIT = 'Device({:s}): Unable to Get Initial resource(key={:s}); error({:s})' +ERROR_DELETE = 'Device({:s}): Unable to Delete resource(key={:s}, value={:s}); error({:s})' +ERROR_SET = 'Device({:s}): Unable to Set resource(key={:s}, value={:s}); error({:s})' + +ERROR_SAMPLETYPE = 'Device({:s})/EndPoint({:s}): SampleType({:s}/{:s}) not supported' + +ERROR_SUBSCRIBE = 'Device({:s}): Unable to Subscribe subscription(key={:s}, duration={:s}, interval={:s}); '+\ + 'error({:s})' +ERROR_UNSUBSCRIBE = 'Device({:s}): Unable to Unsubscribe subscription(key={:s}, duration={:s}, interval={:s}); '+\ + 'error({:s})' diff --git a/src/device/service/Tools.py b/src/device/service/Tools.py index 0698be883..d2cd0b481 100644 --- a/src/device/service/Tools.py +++ b/src/device/service/Tools.py @@ -13,7 +13,7 @@ # limitations under the License. import json -from typing import Any, Dict, List, Tuple +from typing import Any, Dict, List, Tuple, Union from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME from common.method_wrappers.ServiceExceptions import InvalidArgumentException from common.proto.context_pb2 import ConfigActionEnum, Device, DeviceConfig @@ -22,18 +22,9 @@ from common.proto.kpi_sample_types_pb2 import KpiSampleType from common.tools.grpc.Tools import grpc_message_to_json from .driver_api._Driver import _Driver, RESOURCE_ENDPOINTS from .monitoring.MonitoringLoops import MonitoringLoops - -ERROR_ENDPOINT = 'Device({:s}): GetConfig retrieved malformed Endpoint({:s})' -ERROR_GET = 'Device({:s}): Unable to Get resource(key={:s}); error({:s})' -ERROR_GET_INIT = 'Device({:s}): Unable to Get Initial resource(key={:s}); error({:s})' -ERROR_SET = 'Device({:s}): Unable to Set resource(key={:s}, value={:s}); error({:s})' -ERROR_DELETE = 'Device({:s}): Unable to Delete resource(key={:s}, value={:s}); error({:s})' -ERROR_SAMPLETYPE = 'Device({:s})/EndPoint({:s}): SampleType({:s}/{:s}) not supported' -ERROR_SUBSCRIBE = 'Device({:s}): Unable to Subscribe subscription(key={:s}, duration={:s}, interval={:s}); '+\ - 'error({:s})' -ERROR_MISSING_KPI = 'Device({:s}): Kpi({:s}) not found' -ERROR_UNSUBSCRIBE = 'Device({:s}): Unable to Unsubscribe subscription(key={:s}, duration={:s}, interval={:s}); '+\ - 'error({:s})' +from .Errors import ( + ERROR_BAD_ENDPOINT, ERROR_DELETE, ERROR_GET, ERROR_GET_INIT, ERROR_MISSING_KPI, ERROR_SAMPLETYPE, ERROR_SET, + ERROR_SUBSCRIBE, ERROR_UNSUBSCRIBE) def check_connect_rules(device_config : DeviceConfig) -> Dict[str, Any]: connection_config_rules = dict() @@ -91,7 +82,7 @@ def populate_endpoints(device : Device, driver : _Driver, monitoring_loops : Mon errors : List[str] = list() for endpoint in results_getconfig: if len(endpoint) != 2: - errors.append(ERROR_ENDPOINT.format(device_uuid, str(endpoint))) + errors.append(ERROR_BAD_ENDPOINT.format(device_uuid, str(endpoint))) continue resource_key, resource_value = endpoint @@ -115,40 +106,35 @@ def populate_endpoints(device : Device, driver : _Driver, monitoring_loops : Mon return errors -def populate_config_rules(device : Device, driver : _Driver) -> List[str]: - device_uuid = device.device_id.device_uuid.uuid - - resources_to_get = ['ALL'] - results_getconfig = driver.GetConfig() - +def _raw_config_rules_to_grpc( + device_uuid : str, device_config : DeviceConfig, error_template : str, default_config_action : ConfigActionEnum, + raw_config_rules : List[Tuple[str, Union[Any, Exception, None]]] +) -> List[str]: errors : List[str] = list() - for resource_key, resource_value in zip(resources_to_get, results_getconfig): + + for resource_key, resource_value in raw_config_rules: if isinstance(resource_value, Exception): - errors.append(ERROR_GET.format(device_uuid, str(resource_key), str(resource_value))) + errors.append(error_template.format(device_uuid, str(resource_key), str(resource_value))) continue - config_rule = device.device_config.config_rules.add() - config_rule.action = ConfigActionEnum.CONFIGACTION_SET + config_rule = device_config.config_rules.add() + config_rule.action = default_config_action config_rule.custom.resource_key = resource_key - config_rule.custom.resource_value = json.dumps(resource_value, sort_keys=True) + config_rule.custom.resource_value = \ + resource_value if isinstance(resource_value, str) else json.dumps(resource_value, sort_keys=True) return errors +def populate_config_rules(device : Device, driver : _Driver) -> List[str]: + device_uuid = device.device_id.device_uuid.uuid + results_getconfig = driver.GetConfig() + return _raw_config_rules_to_grpc( + device_uuid, device.device_config, ERROR_GET, ConfigActionEnum.CONFIGACTION_SET, results_getconfig) + def populate_initial_config_rules(device_uuid : str, device_config : DeviceConfig, driver : _Driver) -> List[str]: results_getinitconfig = driver.GetInitialConfig() - - errors : List[str] = list() - for resource_key, resource_value in results_getinitconfig: - if isinstance(resource_value, Exception): - errors.append(ERROR_GET_INIT.format(device_uuid, str(resource_key), str(resource_value))) - continue - - config_rule = device_config.config_rules.add() - config_rule.action = ConfigActionEnum.CONFIGACTION_SET - config_rule.custom.resource_key = resource_key - config_rule.custom.resource_value = json.dumps(resource_value, sort_keys=True) - - return errors + return _raw_config_rules_to_grpc( + device_uuid, device_config, ERROR_GET_INIT, ConfigActionEnum.CONFIGACTION_SET, results_getinitconfig) def compute_rules_to_add_delete( device : Device, request : Device @@ -186,37 +172,27 @@ def configure_rules(device : Device, driver : _Driver, resources_to_set : List[T device_uuid = device.device_id.device_uuid.uuid results_setconfig = driver.SetConfig(resources_to_set) + results_setconfig = [ + (resource_key, result if isinstance(result, Exception) else resource_value) + for (resource_key, resource_value), result in zip(resources_to_set, results_setconfig) + ] - errors : List[str] = list() - for (resource_key, resource_value), result in zip(resources_to_set, results_setconfig): - if isinstance(result, Exception): - errors.append(ERROR_SET.format(device_uuid, str(resource_key), str(resource_value), str(result))) - continue - # add to config of device - config_rule = device.device_config.config_rules.add() - config_rule.action = ConfigActionEnum.CONFIGACTION_SET - config_rule.custom.resource_key = resource_key - config_rule.custom.resource_value = json.dumps(resource_value, sort_keys=True) - - return errors + device_config = DeviceConfig() # ignored; added at the end of ConfigureDevice + return _raw_config_rules_to_grpc( + device_uuid, device_config, ERROR_SET, ConfigActionEnum.CONFIGACTION_SET, results_setconfig) def deconfigure_rules(device : Device, driver : _Driver, resources_to_delete : List[Tuple[str, Any]]) -> List[str]: device_uuid = device.device_id.device_uuid.uuid results_deleteconfig = driver.DeleteConfig(resources_to_delete) + results_deleteconfig = [ + (resource_key, result if isinstance(result, Exception) else resource_value) + for (resource_key, resource_value), result in zip(resources_to_delete, results_deleteconfig) + ] - errors : List[str] = list() - for (resource_key, resource_value), result in zip(resources_to_delete, results_deleteconfig): - if isinstance(result, Exception): - errors.append(ERROR_DELETE.format(device_uuid, str(resource_key), str(resource_value), str(result))) - continue - # remove from config of device - config_rule = device.device_config.config_rules.add() - config_rule.action = ConfigActionEnum.CONFIGACTION_SET - config_rule.custom.resource_key = resource_key - config_rule.custom.resource_value = json.dumps(resource_value, sort_keys=True) - - return errors + device_config = DeviceConfig() # ignored; added at the end of ConfigureDevice + return _raw_config_rules_to_grpc( + device_uuid, device_config, ERROR_DELETE, ConfigActionEnum.CONFIGACTION_DELETE, results_deleteconfig) def subscribe_kpi(request : MonitoringSettings, driver : _Driver, monitoring_loops : MonitoringLoops) -> List[str]: kpi_uuid = request.kpi_id.kpi_id.uuid @@ -253,20 +229,11 @@ def subscribe_kpi(request : MonitoringSettings, driver : _Driver, monitoring_loo return errors def unsubscribe_kpi(request : MonitoringSettings, driver : _Driver, monitoring_loops : MonitoringLoops) -> List[str]: - kpi_uuid = request.kpi_id.kpi_id.uuid - device_uuid = request.kpi_descriptor.device_id.device_uuid.uuid - #endpoint_uuid = request.kpi_descriptor.endpoint_id.endpoint_uuid.uuid - #kpi_sample_type = request.kpi_descriptor.kpi_sample_type - - # TODO: consider if further validation needs to be done (correct endpoint_uuid?, correct kpi_sample_type?) - #resource_key = monitoring_loops.get_resource_key(device_uuid, endpoint_uuid, kpi_sample_type) - #if resource_key is None: - # kpi_sample_type_name = KpiSampleType.Name(kpi_sample_type).upper().replace('KPISAMPLETYPE_', '') - # return [ERROR_SAMPLETYPE.format(device_uuid, endpoint_uuid, str(kpi_sample_type), str(kpi_sample_type_name))] + kpi_uuid = request.kpi_id.kpi_id.uuid kpi_details = monitoring_loops.get_kpi_by_uuid(kpi_uuid) if kpi_details is None: - return [ERROR_MISSING_KPI.format(str(device_uuid), str(kpi_uuid))] + return [ERROR_MISSING_KPI.format(str(kpi_uuid))] device_uuid, resource_key, sampling_duration, sampling_interval = kpi_details diff --git a/src/device/tests/Device_Emulated.py b/src/device/tests/Device_Emulated.py index 7b8f15918..cf564b0bf 100644 --- a/src/device/tests/Device_Emulated.py +++ b/src/device/tests/Device_Emulated.py @@ -18,7 +18,7 @@ from common.tools.object_factory.Device import ( json_device_emulated_connect_rules, json_device_emulated_packet_router_disabled, json_device_id) from device.tests.CommonObjects import PACKET_PORT_SAMPLE_TYPES -DEVICE_EMU_UUID = 'EMULATED' +DEVICE_EMU_UUID = 'R1-EMU' DEVICE_EMU_ID = json_device_id(DEVICE_EMU_UUID) DEVICE_EMU = json_device_emulated_packet_router_disabled(DEVICE_EMU_UUID) DEVICE_EMU_EP_UUIDS = ['EP1', 'EP2', 'EP3', 'EP4'] diff --git a/src/device/tests/test_unitary_emulated.py b/src/device/tests/test_unitary_emulated.py index 745c25c1e..8a1b30a6e 100644 --- a/src/device/tests/test_unitary_emulated.py +++ b/src/device/tests/test_unitary_emulated.py @@ -168,12 +168,14 @@ def test_device_emulated_configure( config_rule = ( ConfigActionEnum.Name(config_rule['action']), config_rule['custom']['resource_key'], json.loads(json.dumps(config_rule['custom']['resource_value']))) + #LOGGER.info('config_rule: {:s} {:s} = {:s}'.format(*config_rule)) assert config_rule in config_rules for config_rule in DEVICE_EMU_CONFIG_ADDRESSES: assert 'custom' in config_rule config_rule = ( ConfigActionEnum.Name(config_rule['action']), config_rule['custom']['resource_key'], json.loads(json.dumps(config_rule['custom']['resource_value']))) + #LOGGER.info('config_rule: {:s} {:s} = {:s}'.format(*config_rule)) assert config_rule in config_rules # Try to reconfigure... @@ -222,6 +224,7 @@ def test_device_emulated_configure( config_rule = ( ConfigActionEnum.Name(config_rule['action']), config_rule['custom']['resource_key'], config_rule['custom']['resource_value']) + #LOGGER.info('config_rule: {:s} {:s} = {:s}'.format(*config_rule)) assert config_rule in config_rules -- GitLab From a72e275d5b7402e1fc1b72bb5f19b220f0c770bb Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Tue, 24 Jan 2023 16:57:55 +0000 Subject: [PATCH 115/158] Device component: - extended AddDevice to create a device placeholder and get the correct device UUID --- .../service/DeviceServiceServicerImpl.py | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/src/device/service/DeviceServiceServicerImpl.py b/src/device/service/DeviceServiceServicerImpl.py index 628b0884f..6674ef134 100644 --- a/src/device/service/DeviceServiceServicerImpl.py +++ b/src/device/service/DeviceServiceServicerImpl.py @@ -43,8 +43,7 @@ class DeviceServiceServicerImpl(DeviceServiceServicer): @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def AddDevice(self, request : Device, context : grpc.ServicerContext) -> DeviceId: - device_id = request.device_id - device_uuid = device_id.device_uuid.uuid + device_uuid = request.device_id.device_uuid.uuid connection_config_rules = check_connect_rules(request.device_config) check_no_endpoints(request.device_endpoints) @@ -52,9 +51,18 @@ class DeviceServiceServicerImpl(DeviceServiceServicer): context_client = ContextClient() device = get_device(context_client, device_uuid, rw_copy=True) if device is None: - # not in context, create from request + # not in context, create blank one to get UUID, and populate it below device = Device() - device.CopyFrom(request) + device.device_id.CopyFrom(request.device_id) # pylint: disable=no-member + device.name = request.name + device.device_type = request.device_type + device.device_operational_status = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_UNDEFINED + device.device_drivers.extend(request.device_drivers) # pylint: disable=no-member + device_id = context_client.SetDevice(device) + device = get_device(context_client, device_id.device_uuid.uuid, rw_copy=True) + + # update device_uuid to honor UUID provided by Context + device_uuid = device.device_id.device_uuid.uuid self.mutex_queues.wait_my_turn(device_uuid) try: @@ -70,6 +78,8 @@ class DeviceServiceServicerImpl(DeviceServiceServicer): # created from request, populate config rules using driver errors.extend(populate_config_rules(device, driver)) + # TODO: populate components + if len(errors) > 0: for error in errors: LOGGER.error(error) raise OperationFailedException('AddDevice', extra_details=errors) -- GitLab From ae294326c44ca263dc87c7b7f191b0a5f95e069b Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Tue, 24 Jan 2023 16:58:53 +0000 Subject: [PATCH 116/158] WebUI component: - improvements in details pages --- src/webui/service/templates/device/detail.html | 8 ++++---- src/webui/service/templates/link/detail.html | 8 +++----- src/webui/service/templates/service/detail.html | 10 +++++----- src/webui/service/templates/slice/detail.html | 10 +++++----- 4 files changed, 17 insertions(+), 19 deletions(-) diff --git a/src/webui/service/templates/device/detail.html b/src/webui/service/templates/device/detail.html index adf503952..f21cbbcf0 100644 --- a/src/webui/service/templates/device/detail.html +++ b/src/webui/service/templates/device/detail.html @@ -43,9 +43,9 @@ <br> <div class="row mb-3"> <div class="col-sm-4"> - <b>UUID: </b>{{ device.device_id.device_uuid.uuid }}<br><br> - <b>Name: </b>{{ device.name }}<br><br> - <b>Type: </b>{{ device.device_type }}<br><br> + <b>UUID: </b>{{ device.device_id.device_uuid.uuid }}<br> + <b>Name: </b>{{ device.name }}<br> + <b>Type: </b>{{ device.device_type }}<br> <b>Status: </b> {{ dose.Name(device.device_operational_status).replace('DEVICEOPERATIONALSTATUS_', '') }}<br> <b>Drivers: </b> <ul> @@ -58,7 +58,7 @@ <table class="table table-striped table-hover"> <thead> <tr> - <th scope="col">Endpoint</th> + <th scope="col">Endpoint UUID</th> <th scope="col">Name</th> <th scope="col">Type</th> </tr> diff --git a/src/webui/service/templates/link/detail.html b/src/webui/service/templates/link/detail.html index fc865a4b9..16ec5470c 100644 --- a/src/webui/service/templates/link/detail.html +++ b/src/webui/service/templates/link/detail.html @@ -29,16 +29,14 @@ <br> <div class="row mb-3"> <div class="col-sm-4"> - <b>UUID: </b>{{ link.link_id.link_uuid.uuid }}<br><br> - </div> - <div class="col-sm-4"> - <b>Name: </b>{{ link.name }}<br><br> + <b>UUID: </b>{{ link.link_id.link_uuid.uuid }}<br> + <b>Name: </b>{{ link.name }}<br> </div> <div class="col-sm-8"> <table class="table table-striped table-hover"> <thead> <tr> - <th scope="col">Endpoints</th> + <th scope="col">Endpoint UUID</th> <th scope="col">Device</th> </tr> </thead> diff --git a/src/webui/service/templates/service/detail.html b/src/webui/service/templates/service/detail.html index 9167f0016..67b240b3d 100644 --- a/src/webui/service/templates/service/detail.html +++ b/src/webui/service/templates/service/detail.html @@ -43,16 +43,16 @@ <div class="row mb-3"> <div class="col-sm-4"> - <b>Context: </b> {{ service.service_id.context_id.context_uuid.uuid }}<br><br> - <b>UUID: </b> {{ service.service_id.service_uuid.uuid }}<br><br> - <b>Type: </b> {{ ste.Name(service.service_type).replace('SERVICETYPE_', '') }}<br><br> - <b>Status: </b> {{ sse.Name(service.service_status.service_status).replace('SERVICESTATUS_', '') }}<br><br> + <b>Context: </b> {{ service.service_id.context_id.context_uuid.uuid }}<br> + <b>UUID: </b> {{ service.service_id.service_uuid.uuid }}<br> + <b>Type: </b> {{ ste.Name(service.service_type).replace('SERVICETYPE_', '') }}<br> + <b>Status: </b> {{ sse.Name(service.service_status.service_status).replace('SERVICESTATUS_', '') }}<br> </div> <div class="col-sm-8"> <table class="table table-striped table-hover"> <thead> <tr> - <th scope="col">Endpoints</th> + <th scope="col">Endpoint UUID</th> <th scope="col">Device</th> </tr> </thead> diff --git a/src/webui/service/templates/slice/detail.html b/src/webui/service/templates/slice/detail.html index 9bd4eb0d9..404dede39 100644 --- a/src/webui/service/templates/slice/detail.html +++ b/src/webui/service/templates/slice/detail.html @@ -44,16 +44,16 @@ <div class="row mb-3"> <div class="col-sm-4"> - <b>Context: </b> {{ slice.slice_id.context_id.context_uuid.uuid }}<br><br> - <b>UUID: </b> {{ slice.slice_id.slice_uuid.uuid }}<br><br> - <b>Owner: </b> {{ slice.slice_owner.owner_uuid.uuid }}<br><br> - <b>Status: </b> {{ sse.Name(slice.slice_status.slice_status).replace('SLICESTATUS_', '') }}<br><br> + <b>Context: </b> {{ slice.slice_id.context_id.context_uuid.uuid }}<br> + <b>UUID: </b> {{ slice.slice_id.slice_uuid.uuid }}<br> + <b>Owner: </b> {{ slice.slice_owner.owner_uuid.uuid }}<br> + <b>Status: </b> {{ sse.Name(slice.slice_status.slice_status).replace('SLICESTATUS_', '') }}<br> </div> <div class="col-sm-8"> <table class="table table-striped table-hover"> <thead> <tr> - <th scope="col">Endpoints</th> + <th scope="col">Endpoint UUID</th> <th scope="col">Device</th> </tr> </thead> -- GitLab From 47eb992a6909001970a6f1be05875e920179bc96 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Tue, 24 Jan 2023 17:06:13 +0000 Subject: [PATCH 117/158] Context component: - updated device_set and link_set to support placeholder creation --- src/context/service/database/Device.py | 34 ++++++++++++++------------ src/context/service/database/Link.py | 20 ++++++++------- 2 files changed, 29 insertions(+), 25 deletions(-) diff --git a/src/context/service/database/Device.py b/src/context/service/database/Device.py index e40c28e69..07d1c7606 100644 --- a/src/context/service/database/Device.py +++ b/src/context/service/database/Device.py @@ -136,23 +136,25 @@ def device_set(db_engine : Engine, request : Device) -> Tuple[Dict, bool]: created_at,updated_at = session.execute(stmt).fetchone() updated = updated_at > created_at - stmt = insert(EndPointModel).values(endpoints_data) - stmt = stmt.on_conflict_do_update( - index_elements=[EndPointModel.endpoint_uuid], - set_=dict( - name = stmt.excluded.name, - endpoint_type = stmt.excluded.endpoint_type, - kpi_sample_types = stmt.excluded.kpi_sample_types, - updated_at = stmt.excluded.updated_at, + if len(endpoints_data) > 0: + stmt = insert(EndPointModel).values(endpoints_data) + stmt = stmt.on_conflict_do_update( + index_elements=[EndPointModel.endpoint_uuid], + set_=dict( + name = stmt.excluded.name, + endpoint_type = stmt.excluded.endpoint_type, + kpi_sample_types = stmt.excluded.kpi_sample_types, + updated_at = stmt.excluded.updated_at, + ) ) - ) - stmt = stmt.returning(EndPointModel.created_at, EndPointModel.updated_at) - endpoint_updates = session.execute(stmt).fetchall() - updated = updated or any([(updated_at > created_at) for created_at,updated_at in endpoint_updates]) - - session.execute(insert(TopologyDeviceModel).values(related_topologies).on_conflict_do_nothing( - index_elements=[TopologyDeviceModel.topology_uuid, TopologyDeviceModel.device_uuid] - )) + stmt = stmt.returning(EndPointModel.created_at, EndPointModel.updated_at) + endpoint_updates = session.execute(stmt).fetchall() + updated = updated or any([(updated_at > created_at) for created_at,updated_at in endpoint_updates]) + + if len(related_topologies) > 0: + session.execute(insert(TopologyDeviceModel).values(related_topologies).on_conflict_do_nothing( + index_elements=[TopologyDeviceModel.topology_uuid, TopologyDeviceModel.device_uuid] + )) configrule_updates = upsert_config_rules(session, config_rules, device_uuid=device_uuid) updated = updated or any([(updated_at > created_at) for created_at,updated_at in configrule_updates]) diff --git a/src/context/service/database/Link.py b/src/context/service/database/Link.py index 2621e73dc..2d9e80894 100644 --- a/src/context/service/database/Link.py +++ b/src/context/service/database/Link.py @@ -100,16 +100,18 @@ def link_set(db_engine : Engine, request : Link) -> Tuple[Dict, bool]: created_at,updated_at = session.execute(stmt).fetchone() updated = updated_at > created_at - # TODO: manage add/remove of endpoints; manage changes in relations with topology - stmt = insert(LinkEndPointModel).values(link_endpoints_data) - stmt = stmt.on_conflict_do_nothing( - index_elements=[LinkEndPointModel.link_uuid, LinkEndPointModel.endpoint_uuid] - ) - session.execute(stmt) + if len(link_endpoints_data) > 0: + # TODO: manage add/remove of endpoints; manage changes in relations with topology + stmt = insert(LinkEndPointModel).values(link_endpoints_data) + stmt = stmt.on_conflict_do_nothing( + index_elements=[LinkEndPointModel.link_uuid, LinkEndPointModel.endpoint_uuid] + ) + session.execute(stmt) - session.execute(insert(TopologyLinkModel).values(related_topologies).on_conflict_do_nothing( - index_elements=[TopologyLinkModel.topology_uuid, TopologyLinkModel.link_uuid] - )) + if len(related_topologies) > 0: + session.execute(insert(TopologyLinkModel).values(related_topologies).on_conflict_do_nothing( + index_elements=[TopologyLinkModel.topology_uuid, TopologyLinkModel.link_uuid] + )) return updated -- GitLab From 65d6544128ab343ad5743d7b991ec4cd7c353bec Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Tue, 24 Jan 2023 17:19:54 +0000 Subject: [PATCH 118/158] Manifests: - activated debug for testing purposes - reduced number of replicas to 1 --- manifests/contextservice.yaml | 15 ++------------- manifests/deviceservice.yaml | 2 +- manifests/pathcompservice.yaml | 2 +- manifests/serviceservice.yaml | 4 ++-- manifests/sliceservice.yaml | 2 +- 5 files changed, 7 insertions(+), 18 deletions(-) diff --git a/manifests/contextservice.yaml b/manifests/contextservice.yaml index 74955dc6f..f5844d81b 100644 --- a/manifests/contextservice.yaml +++ b/manifests/contextservice.yaml @@ -20,7 +20,7 @@ spec: selector: matchLabels: app: contextservice - replicas: 5 + replicas: 1 template: metadata: labels: @@ -28,17 +28,6 @@ spec: spec: terminationGracePeriodSeconds: 5 containers: - #- name: redis - # image: redis:6.2 - # ports: - # - containerPort: 6379 - # resources: - # requests: - # cpu: 100m - # memory: 128Mi - # limits: - # cpu: 500m - # memory: 1024Mi - name: server image: registry.gitlab.com/teraflow-h2020/controller/context:latest imagePullPolicy: Always @@ -53,7 +42,7 @@ spec: #- name: NATS_URI # value: "nats://tfs:tfs123@nats-public.nats.svc.cluster.local:4222" - name: LOG_LEVEL - value: "INFO" + value: "DEBUG" readinessProbe: exec: command: ["/bin/grpc_health_probe", "-addr=:1010"] diff --git a/manifests/deviceservice.yaml b/manifests/deviceservice.yaml index 83daa41f3..960096b93 100644 --- a/manifests/deviceservice.yaml +++ b/manifests/deviceservice.yaml @@ -36,7 +36,7 @@ spec: - containerPort: 9192 env: - name: LOG_LEVEL - value: "INFO" + value: "DEBUG" readinessProbe: exec: command: ["/bin/grpc_health_probe", "-addr=:2020"] diff --git a/manifests/pathcompservice.yaml b/manifests/pathcompservice.yaml index 71c927b56..e9b890e76 100644 --- a/manifests/pathcompservice.yaml +++ b/manifests/pathcompservice.yaml @@ -20,7 +20,7 @@ spec: selector: matchLabels: app: pathcompservice - replicas: 5 + replicas: 1 template: metadata: labels: diff --git a/manifests/serviceservice.yaml b/manifests/serviceservice.yaml index 089be20f9..b24bf13f0 100644 --- a/manifests/serviceservice.yaml +++ b/manifests/serviceservice.yaml @@ -20,7 +20,7 @@ spec: selector: matchLabels: app: serviceservice - replicas: 5 + replicas: 1 template: metadata: labels: @@ -36,7 +36,7 @@ spec: - containerPort: 9192 env: - name: LOG_LEVEL - value: "INFO" + value: "DEBUG" readinessProbe: exec: command: ["/bin/grpc_health_probe", "-addr=:3030"] diff --git a/manifests/sliceservice.yaml b/manifests/sliceservice.yaml index ff4b41fe7..375344a97 100644 --- a/manifests/sliceservice.yaml +++ b/manifests/sliceservice.yaml @@ -36,7 +36,7 @@ spec: - containerPort: 9192 env: - name: LOG_LEVEL - value: "INFO" + value: "DEBUG" readinessProbe: exec: command: ["/bin/grpc_health_probe", "-addr=:4040"] -- GitLab From 9fe3c1a156b8b0e00dad774f7ebb6188c03cbaea Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Tue, 24 Jan 2023 17:36:19 +0000 Subject: [PATCH 119/158] Device component: - added copy of connection config rules when creating device - added log messages for testing purposes --- src/device/service/DeviceServiceServicerImpl.py | 1 + src/device/service/Tools.py | 12 +++++++++++- src/device/service/driver_api/DriverInstanceCache.py | 2 +- 3 files changed, 13 insertions(+), 2 deletions(-) diff --git a/src/device/service/DeviceServiceServicerImpl.py b/src/device/service/DeviceServiceServicerImpl.py index 6674ef134..179b7795b 100644 --- a/src/device/service/DeviceServiceServicerImpl.py +++ b/src/device/service/DeviceServiceServicerImpl.py @@ -58,6 +58,7 @@ class DeviceServiceServicerImpl(DeviceServiceServicer): device.device_type = request.device_type device.device_operational_status = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_UNDEFINED device.device_drivers.extend(request.device_drivers) # pylint: disable=no-member + device.device_config.CopyFrom(request.device_config) # pylint: disable=no-member device_id = context_client.SetDevice(device) device = get_device(context_client, device_id.device_uuid.uuid, rw_copy=True) diff --git a/src/device/service/Tools.py b/src/device/service/Tools.py index d2cd0b481..c86838c62 100644 --- a/src/device/service/Tools.py +++ b/src/device/service/Tools.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import json +import json, logging from typing import Any, Dict, List, Tuple, Union from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME from common.method_wrappers.ServiceExceptions import InvalidArgumentException @@ -26,6 +26,8 @@ from .Errors import ( ERROR_BAD_ENDPOINT, ERROR_DELETE, ERROR_GET, ERROR_GET_INIT, ERROR_MISSING_KPI, ERROR_SAMPLETYPE, ERROR_SET, ERROR_SUBSCRIBE, ERROR_UNSUBSCRIBE) +LOGGER = logging.getLogger(__name__) + def check_connect_rules(device_config : DeviceConfig) -> Dict[str, Any]: connection_config_rules = dict() unexpected_config_rules = list() @@ -77,10 +79,14 @@ def populate_endpoints(device : Device, driver : _Driver, monitoring_loops : Mon device_uuid = device.device_id.device_uuid.uuid resources_to_get = [RESOURCE_ENDPOINTS] + LOGGER.warning('resources_to_get = {:s}'.format(str(resources_to_get))) results_getconfig = driver.GetConfig(resources_to_get) + LOGGER.warning('results_getconfig = {:s}'.format(str(results_getconfig))) errors : List[str] = list() for endpoint in results_getconfig: + LOGGER.warning('endpoint = {:s}'.format(str(endpoint))) + if len(endpoint) != 2: errors.append(ERROR_BAD_ENDPOINT.format(device_uuid, str(endpoint))) continue @@ -90,6 +96,8 @@ def populate_endpoints(device : Device, driver : _Driver, monitoring_loops : Mon errors.append(ERROR_GET.format(device_uuid, str(resource_key), str(resource_value))) continue + LOGGER.warning('resource_value = {:s}'.format(str(resource_value))) + endpoint_uuid = resource_value.get('uuid') device_endpoint = device.device_endpoints.add() @@ -104,6 +112,8 @@ def populate_endpoints(device : Device, driver : _Driver, monitoring_loops : Mon device_endpoint.kpi_sample_types.append(kpi_sample_type) monitoring_loops.add_resource_key(device_uuid, endpoint_uuid, kpi_sample_type, monitor_resource_key) + LOGGER.warning('device.device_endpoints = {:s}'.format(str(device.device_endpoints))) + LOGGER.warning('errors = {:s}'.format(str(errors))) return errors def _raw_config_rules_to_grpc( diff --git a/src/device/service/driver_api/DriverInstanceCache.py b/src/device/service/driver_api/DriverInstanceCache.py index 29fecf36f..7bddd70aa 100644 --- a/src/device/service/driver_api/DriverInstanceCache.py +++ b/src/device/service/driver_api/DriverInstanceCache.py @@ -84,7 +84,7 @@ def get_driver(driver_instance_cache : DriverInstanceCache, device : Device) -> driver_filter_fields = get_device_driver_filter_fields(device) connect_rules = get_connect_rules(device.device_config) - #LOGGER.info('[get_driver] connect_rules = {:s}'.format(str(connect_rules))) + LOGGER.info('[get_driver] connect_rules = {:s}'.format(str(connect_rules))) address = connect_rules.get('address', '127.0.0.1') port = connect_rules.get('port', '0') settings = connect_rules.get('settings', '{}') -- GitLab From 9fa1fc44fd76a6f49971e5b9a2d825cdd192fa75 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Tue, 24 Jan 2023 17:46:45 +0000 Subject: [PATCH 120/158] Context component: - added retrieve of action in config rules --- src/context/service/database/models/ConfigRuleModel.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/context/service/database/models/ConfigRuleModel.py b/src/context/service/database/models/ConfigRuleModel.py index c2305b001..fa8b9c2d2 100644 --- a/src/context/service/database/models/ConfigRuleModel.py +++ b/src/context/service/database/models/ConfigRuleModel.py @@ -46,4 +46,7 @@ class ConfigRuleModel(_Base): ) def dump(self) -> Dict: - return {self.kind.value: json.loads(self.data)} + return { + 'action': self.action.value, + self.kind.value: json.loads(self.data), + } -- GitLab From 641ddae8ddc69c8795929b241a3764dc0f35e631 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Tue, 24 Jan 2023 17:53:55 +0000 Subject: [PATCH 121/158] Device component: - removed log messages for testing purposes --- src/device/service/Tools.py | 12 +----------- src/device/service/driver_api/DriverInstanceCache.py | 2 +- 2 files changed, 2 insertions(+), 12 deletions(-) diff --git a/src/device/service/Tools.py b/src/device/service/Tools.py index c86838c62..d2cd0b481 100644 --- a/src/device/service/Tools.py +++ b/src/device/service/Tools.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import json, logging +import json from typing import Any, Dict, List, Tuple, Union from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME from common.method_wrappers.ServiceExceptions import InvalidArgumentException @@ -26,8 +26,6 @@ from .Errors import ( ERROR_BAD_ENDPOINT, ERROR_DELETE, ERROR_GET, ERROR_GET_INIT, ERROR_MISSING_KPI, ERROR_SAMPLETYPE, ERROR_SET, ERROR_SUBSCRIBE, ERROR_UNSUBSCRIBE) -LOGGER = logging.getLogger(__name__) - def check_connect_rules(device_config : DeviceConfig) -> Dict[str, Any]: connection_config_rules = dict() unexpected_config_rules = list() @@ -79,14 +77,10 @@ def populate_endpoints(device : Device, driver : _Driver, monitoring_loops : Mon device_uuid = device.device_id.device_uuid.uuid resources_to_get = [RESOURCE_ENDPOINTS] - LOGGER.warning('resources_to_get = {:s}'.format(str(resources_to_get))) results_getconfig = driver.GetConfig(resources_to_get) - LOGGER.warning('results_getconfig = {:s}'.format(str(results_getconfig))) errors : List[str] = list() for endpoint in results_getconfig: - LOGGER.warning('endpoint = {:s}'.format(str(endpoint))) - if len(endpoint) != 2: errors.append(ERROR_BAD_ENDPOINT.format(device_uuid, str(endpoint))) continue @@ -96,8 +90,6 @@ def populate_endpoints(device : Device, driver : _Driver, monitoring_loops : Mon errors.append(ERROR_GET.format(device_uuid, str(resource_key), str(resource_value))) continue - LOGGER.warning('resource_value = {:s}'.format(str(resource_value))) - endpoint_uuid = resource_value.get('uuid') device_endpoint = device.device_endpoints.add() @@ -112,8 +104,6 @@ def populate_endpoints(device : Device, driver : _Driver, monitoring_loops : Mon device_endpoint.kpi_sample_types.append(kpi_sample_type) monitoring_loops.add_resource_key(device_uuid, endpoint_uuid, kpi_sample_type, monitor_resource_key) - LOGGER.warning('device.device_endpoints = {:s}'.format(str(device.device_endpoints))) - LOGGER.warning('errors = {:s}'.format(str(errors))) return errors def _raw_config_rules_to_grpc( diff --git a/src/device/service/driver_api/DriverInstanceCache.py b/src/device/service/driver_api/DriverInstanceCache.py index 7bddd70aa..29fecf36f 100644 --- a/src/device/service/driver_api/DriverInstanceCache.py +++ b/src/device/service/driver_api/DriverInstanceCache.py @@ -84,7 +84,7 @@ def get_driver(driver_instance_cache : DriverInstanceCache, device : Device) -> driver_filter_fields = get_device_driver_filter_fields(device) connect_rules = get_connect_rules(device.device_config) - LOGGER.info('[get_driver] connect_rules = {:s}'.format(str(connect_rules))) + #LOGGER.info('[get_driver] connect_rules = {:s}'.format(str(connect_rules))) address = connect_rules.get('address', '127.0.0.1') port = connect_rules.get('port', '0') settings = connect_rules.get('settings', '{}') -- GitLab From 5e8130ad74cae0064e3419942a896bd754fe203c Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Wed, 25 Jan 2023 12:42:53 +0000 Subject: [PATCH 122/158] All components: - migrated from DEFAULT_CONTEXT_UUID to DEFAULT_CONTEXT_NAME - migrated from DEFAULT_TOPOLOGY_UUID to DEFAULT_TOPOLOGY_NAME - migrated from INTERDOMAIN_TOPOLOGY_UUID to INTERDOMAIN_TOPOLOGY_NAME --- hackfest/p4/tests/Objects.py | 10 +++---- src/common/Constants.py | 5 +--- .../nbi_plugins/ietf_l2vpn/L2VPN_Services.py | 4 +-- src/compute/tests/test_debug_api.py | 28 +++++++++---------- .../event_dispatcher/DltEventDispatcher.py | 24 ++++++++-------- .../service/RemoteDomainClients.py | 6 ++-- src/interdomain/service/Tools.py | 8 +++--- .../topology_abstractor/AbstractDevice.py | 10 +++---- .../topology_abstractor/AbstractLink.py | 8 +++--- .../topology_abstractor/TopologyAbstractor.py | 18 ++++++------ .../tests/example_objects.py | 6 ++-- .../benchmark/automation/tests/Objects.py | 10 +++---- .../tests/test_functional_delete_service.py | 4 +-- src/tests/ecoc22/tests/Objects_BigNet.py | 10 +++---- src/tests/ecoc22/tests/Objects_DC_CSGW_OLS.py | 8 +++--- src/tests/ecoc22/tests/Objects_DC_CSGW_TN.py | 8 +++--- .../ecoc22/tests/Objects_DC_CSGW_TN_OLS.py | 8 +++--- src/tests/oeccpsc22/tests/Objects_Domain_1.py | 10 +++---- src/tests/oeccpsc22/tests/Objects_Domain_2.py | 10 +++---- src/tests/ofc22/tests/ObjectsXr.py | 10 +++---- .../tests/test_functional_delete_service.py | 4 +-- src/tests/p4/tests/Objects.py | 10 +++---- 22 files changed, 108 insertions(+), 111 deletions(-) diff --git a/hackfest/p4/tests/Objects.py b/hackfest/p4/tests/Objects.py index c8b172244..dcef02552 100644 --- a/hackfest/p4/tests/Objects.py +++ b/hackfest/p4/tests/Objects.py @@ -14,7 +14,7 @@ import os from typing import Dict, List, Tuple -from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID +from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME from common.tools.object_factory.Context import json_context, json_context_id from common.tools.object_factory.Device import ( json_device_connect_rules, json_device_emulated_connect_rules, json_device_emulated_packet_router_disabled, @@ -28,12 +28,12 @@ from common.tools.object_factory.Topology import json_topology, json_topology_id from common.proto.kpi_sample_types_pb2 import KpiSampleType # ----- Context -------------------------------------------------------------------------------------------------------- -CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_UUID) -CONTEXT = json_context(DEFAULT_CONTEXT_UUID) +CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_NAME) +CONTEXT = json_context(DEFAULT_CONTEXT_NAME) # ----- Topology ------------------------------------------------------------------------------------------------------- -TOPOLOGY_ID = json_topology_id(DEFAULT_TOPOLOGY_UUID, context_id=CONTEXT_ID) -TOPOLOGY = json_topology(DEFAULT_TOPOLOGY_UUID, context_id=CONTEXT_ID) +TOPOLOGY_ID = json_topology_id(DEFAULT_TOPOLOGY_NAME, context_id=CONTEXT_ID) +TOPOLOGY = json_topology(DEFAULT_TOPOLOGY_NAME, context_id=CONTEXT_ID) # ----- Monitoring Samples --------------------------------------------------------------------------------------------- PACKET_PORT_SAMPLE_TYPES = [ diff --git a/src/common/Constants.py b/src/common/Constants.py index 0c3afe43c..c26409f27 100644 --- a/src/common/Constants.py +++ b/src/common/Constants.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging #, uuid +import logging from enum import Enum # Default logging level @@ -33,9 +33,6 @@ DEFAULT_METRICS_PORT = 9192 DEFAULT_CONTEXT_NAME = 'admin' DEFAULT_TOPOLOGY_NAME = 'admin' # contains the detailed local topology INTERDOMAIN_TOPOLOGY_NAME = 'inter' # contains the abstract inter-domain topology -#DEFAULT_CONTEXT_UUID = str(uuid.uuid5(uuid.NAMESPACE_OID, DEFAULT_CONTEXT_NAME )) -#DEFAULT_TOPOLOGY_UUID = str(uuid.uuid5(uuid.NAMESPACE_OID, DEFAULT_TOPOLOGY_NAME )) -#INTERDOMAIN_TOPOLOGY_UUID = str(uuid.uuid5(uuid.NAMESPACE_OID, INTERDOMAIN_TOPOLOGY_NAME)) # Default service names class ServiceNameEnum(Enum): diff --git a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Services.py b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Services.py index 248b99896..d27e55047 100644 --- a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Services.py +++ b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Services.py @@ -18,7 +18,7 @@ from flask import request from flask.json import jsonify from flask_restful import Resource from werkzeug.exceptions import UnsupportedMediaType -from common.Constants import DEFAULT_CONTEXT_UUID +from common.Constants import DEFAULT_CONTEXT_NAME from common.proto.context_pb2 import SliceStatusEnum, Slice from slice.client.SliceClient import SliceClient from .schemas.vpn_service import SCHEMA_VPN_SERVICE @@ -45,7 +45,7 @@ class L2VPN_Services(Resource): try: # pylint: disable=no-member slice_request = Slice() - slice_request.slice_id.context_id.context_uuid.uuid = DEFAULT_CONTEXT_UUID + slice_request.slice_id.context_id.context_uuid.uuid = DEFAULT_CONTEXT_NAME slice_request.slice_id.slice_uuid.uuid = vpn_service['vpn-id'] slice_request.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_PLANNED diff --git a/src/compute/tests/test_debug_api.py b/src/compute/tests/test_debug_api.py index 31d204965..6265c3751 100644 --- a/src/compute/tests/test_debug_api.py +++ b/src/compute/tests/test_debug_api.py @@ -14,7 +14,7 @@ import logging, os, pytest, requests, time, urllib from typing import Tuple -from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID, ServiceNameEnum +from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME, ServiceNameEnum from common.proto.context_pb2 import Connection, Context, Device, Link, Service, Slice, Topology from common.proto.policy_pb2 import PolicyRuleIdList, PolicyRuleId, PolicyRuleList, PolicyRule from common.Settings import ( @@ -119,54 +119,54 @@ def test_rest_get_contexts(context_service_rest : RestServer): # pylint: disable validate_contexts(reply) def test_rest_get_context(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) + context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_NAME) reply = do_rest_request('/context/{:s}'.format(context_uuid)) validate_context(reply) def test_rest_get_topology_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) + context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_NAME) reply = do_rest_request('/context/{:s}/topology_ids'.format(context_uuid)) validate_topology_ids(reply) def test_rest_get_topologies(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) + context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_NAME) reply = do_rest_request('/context/{:s}/topologies'.format(context_uuid)) validate_topologies(reply) def test_rest_get_topology(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) - topology_uuid = urllib.parse.quote(DEFAULT_TOPOLOGY_UUID) + context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_NAME) + topology_uuid = urllib.parse.quote(DEFAULT_TOPOLOGY_NAME) reply = do_rest_request('/context/{:s}/topology/{:s}'.format(context_uuid, topology_uuid)) validate_topology(reply, num_devices=3, num_links=3) def test_rest_get_service_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) + context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_NAME) reply = do_rest_request('/context/{:s}/service_ids'.format(context_uuid)) validate_service_ids(reply) def test_rest_get_services(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) + context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_NAME) reply = do_rest_request('/context/{:s}/services'.format(context_uuid)) validate_services(reply) def test_rest_get_service(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) + context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_NAME) service_uuid = urllib.parse.quote(SERVICE_R1_R2_UUID, safe='') reply = do_rest_request('/context/{:s}/service/{:s}'.format(context_uuid, service_uuid)) validate_service(reply) def test_rest_get_slice_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) + context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_NAME) reply = do_rest_request('/context/{:s}/slice_ids'.format(context_uuid)) #validate_slice_ids(reply) def test_rest_get_slices(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) + context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_NAME) reply = do_rest_request('/context/{:s}/slices'.format(context_uuid)) #validate_slices(reply) def test_rest_get_slice(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) + context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_NAME) slice_uuid = urllib.parse.quote(SLICE_R1_R3_UUID, safe='') reply = do_rest_request('/context/{:s}/slice/{:s}'.format(context_uuid, slice_uuid)) #validate_slice(reply) @@ -198,13 +198,13 @@ def test_rest_get_link(context_service_rest : RestServer): # pylint: disable=red validate_link(reply) def test_rest_get_connection_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) + context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_NAME) service_uuid = urllib.parse.quote(SERVICE_R1_R3_UUID, safe='') reply = do_rest_request('/context/{:s}/service/{:s}/connection_ids'.format(context_uuid, service_uuid)) validate_connection_ids(reply) def test_rest_get_connections(context_service_rest : RestServer): # pylint: disable=redefined-outer-name - context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) + context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_NAME) service_uuid = urllib.parse.quote(SERVICE_R1_R3_UUID, safe='') reply = do_rest_request('/context/{:s}/service/{:s}/connections'.format(context_uuid, service_uuid)) validate_connections(reply) diff --git a/src/dlt/connector/service/event_dispatcher/DltEventDispatcher.py b/src/dlt/connector/service/event_dispatcher/DltEventDispatcher.py index 8973ae621..c569d75c3 100644 --- a/src/dlt/connector/service/event_dispatcher/DltEventDispatcher.py +++ b/src/dlt/connector/service/event_dispatcher/DltEventDispatcher.py @@ -14,7 +14,7 @@ import grpc, json, logging, threading from typing import Any, Dict, Set -from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID, INTERDOMAIN_TOPOLOGY_UUID +from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME, INTERDOMAIN_TOPOLOGY_NAME from common.proto.context_pb2 import ContextId, Device, EventTypeEnum, Link, Slice, TopologyId from common.proto.dlt_connector_pb2 import DltSliceId from common.proto.dlt_gateway_pb2 import DltRecordEvent, DltRecordOperationEnum, DltRecordTypeEnum @@ -35,7 +35,7 @@ LOGGER = logging.getLogger(__name__) GET_EVENT_TIMEOUT = 0.5 -ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_UUID)) +ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) class Clients: def __init__(self) -> None: @@ -66,9 +66,9 @@ class DltEventDispatcher(threading.Thread): def run(self) -> None: clients = Clients() - create_context(clients.context_client, DEFAULT_CONTEXT_UUID) - create_topology(clients.context_client, DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID) - create_topology(clients.context_client, DEFAULT_CONTEXT_UUID, INTERDOMAIN_TOPOLOGY_UUID) + create_context(clients.context_client, DEFAULT_CONTEXT_NAME) + create_topology(clients.context_client, DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME) + create_topology(clients.context_client, DEFAULT_CONTEXT_NAME, INTERDOMAIN_TOPOLOGY_NAME) dlt_events_collector = DltEventsCollector(clients.dlt_gateway_client, log_events_received=True) dlt_events_collector.start() @@ -81,8 +81,8 @@ class DltEventDispatcher(threading.Thread): local_domain_uuids = { topology_id.topology_uuid.uuid for topology_id in existing_topology_ids.topology_ids } - local_domain_uuids.discard(DEFAULT_TOPOLOGY_UUID) - local_domain_uuids.discard(INTERDOMAIN_TOPOLOGY_UUID) + local_domain_uuids.discard(DEFAULT_TOPOLOGY_NAME) + local_domain_uuids.discard(INTERDOMAIN_TOPOLOGY_NAME) self.dispatch_event(clients, local_domain_uuids, event) @@ -118,13 +118,13 @@ class DltEventDispatcher(threading.Thread): LOGGER.info('[_dispatch_device] record={:s}'.format(grpc_message_to_json_string(record))) create_context(clients.context_client, domain_uuid) - create_topology(clients.context_client, domain_uuid, DEFAULT_TOPOLOGY_UUID) + create_topology(clients.context_client, domain_uuid, DEFAULT_TOPOLOGY_NAME) device = Device(**json.loads(record.data_json)) clients.context_client.SetDevice(device) device_uuid = device.device_id.device_uuid.uuid # pylint: disable=no-member - add_device_to_topology(clients.context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_UUID, device_uuid) + add_device_to_topology(clients.context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_NAME, device_uuid) domain_context_id = ContextId(**json_context_id(domain_uuid)) - add_device_to_topology(clients.context_client, domain_context_id, DEFAULT_TOPOLOGY_UUID, device_uuid) + add_device_to_topology(clients.context_client, domain_context_id, DEFAULT_TOPOLOGY_NAME, device_uuid) elif event_type in {EventTypeEnum.EVENTTYPE_DELETE}: raise NotImplementedError('Delete Device') @@ -148,7 +148,7 @@ class DltEventDispatcher(threading.Thread): link = Link(**json.loads(record.data_json)) clients.context_client.SetLink(link) link_uuid = link.link_id.link_uuid.uuid # pylint: disable=no-member - add_link_to_topology(clients.context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_UUID, link_uuid) + add_link_to_topology(clients.context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_NAME, link_uuid) elif event_type in {EventTypeEnum.EVENTTYPE_DELETE}: raise NotImplementedError('Delete Link') @@ -165,7 +165,7 @@ class DltEventDispatcher(threading.Thread): context_uuid = slice_.slice_id.context_id.context_uuid.uuid owner_uuid = slice_.slice_owner.owner_uuid.uuid create_context(clients.context_client, context_uuid) - create_topology(clients.context_client, context_uuid, DEFAULT_TOPOLOGY_UUID) + create_topology(clients.context_client, context_uuid, DEFAULT_TOPOLOGY_NAME) if domain_uuid in local_domain_uuids: # it is for "me" diff --git a/src/interdomain/service/RemoteDomainClients.py b/src/interdomain/service/RemoteDomainClients.py index 0aaadfeff..6eb2a9c06 100644 --- a/src/interdomain/service/RemoteDomainClients.py +++ b/src/interdomain/service/RemoteDomainClients.py @@ -13,7 +13,7 @@ # limitations under the License. import logging, socket -from common.Constants import DEFAULT_CONTEXT_UUID, ServiceNameEnum +from common.Constants import DEFAULT_CONTEXT_NAME, ServiceNameEnum from common.Settings import get_service_host, get_service_port_grpc from common.proto.context_pb2 import TeraFlowController from interdomain.client.InterdomainClient import InterdomainClient @@ -25,7 +25,7 @@ class RemoteDomainClients: self.peer_domain = {} def add_peer( - self, domain_name : str, host : str, port : int, context_uuid : str = DEFAULT_CONTEXT_UUID + self, domain_name : str, host : str, port : int, context_uuid : str = DEFAULT_CONTEXT_NAME ) -> None: while True: try: @@ -36,7 +36,7 @@ class RemoteDomainClients: interdomain_client = InterdomainClient(host=host, port=port) request = TeraFlowController() - request.context_id.context_uuid.uuid = DEFAULT_CONTEXT_UUID # pylint: disable=no-member + request.context_id.context_uuid.uuid = DEFAULT_CONTEXT_NAME # pylint: disable=no-member request.ip_address = get_service_host(ServiceNameEnum.INTERDOMAIN) request.port = int(get_service_port_grpc(ServiceNameEnum.INTERDOMAIN)) diff --git a/src/interdomain/service/Tools.py b/src/interdomain/service/Tools.py index fb6371603..472132adb 100644 --- a/src/interdomain/service/Tools.py +++ b/src/interdomain/service/Tools.py @@ -14,7 +14,7 @@ import json, logging from typing import List, Optional, Tuple -from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID, INTERDOMAIN_TOPOLOGY_UUID +from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME, INTERDOMAIN_TOPOLOGY_NAME from common.proto.context_pb2 import ( ConfigRule, Constraint, ContextId, Device, Empty, EndPointId, Slice, SliceStatusEnum) from common.tools.context_queries.CheckType import device_type_is_network, endpoint_type_is_border @@ -32,12 +32,12 @@ def compute_slice_owner( ) -> Optional[str]: traversed_domain_uuids = {traversed_domain[0] for traversed_domain in traversed_domains} - existing_topology_ids = context_client.ListTopologyIds(ContextId(**json_context_id(DEFAULT_CONTEXT_UUID))) + existing_topology_ids = context_client.ListTopologyIds(ContextId(**json_context_id(DEFAULT_CONTEXT_NAME))) existing_topology_uuids = { topology_id.topology_uuid.uuid for topology_id in existing_topology_ids.topology_ids } - existing_topology_uuids.discard(DEFAULT_TOPOLOGY_UUID) - existing_topology_uuids.discard(INTERDOMAIN_TOPOLOGY_UUID) + existing_topology_uuids.discard(DEFAULT_TOPOLOGY_NAME) + existing_topology_uuids.discard(INTERDOMAIN_TOPOLOGY_NAME) candidate_owner_uuids = traversed_domain_uuids.intersection(existing_topology_uuids) if len(candidate_owner_uuids) != 1: diff --git a/src/interdomain/service/topology_abstractor/AbstractDevice.py b/src/interdomain/service/topology_abstractor/AbstractDevice.py index 3448c1036..4bb9683b0 100644 --- a/src/interdomain/service/topology_abstractor/AbstractDevice.py +++ b/src/interdomain/service/topology_abstractor/AbstractDevice.py @@ -14,7 +14,7 @@ import copy, logging from typing import Dict, Optional -from common.Constants import DEFAULT_CONTEXT_UUID, INTERDOMAIN_TOPOLOGY_UUID +from common.Constants import DEFAULT_CONTEXT_NAME, INTERDOMAIN_TOPOLOGY_NAME from common.DeviceTypes import DeviceTypeEnum from common.proto.context_pb2 import ( ContextId, Device, DeviceDriverEnum, DeviceId, DeviceOperationalStatusEnum, EndPoint) @@ -67,9 +67,9 @@ class AbstractDevice: is_datacenter = device_type_is_datacenter(self.__device_type) is_network = device_type_is_network(self.__device_type) if is_datacenter or is_network: - # Add abstract device to topologies [INTERDOMAIN_TOPOLOGY_UUID] - context_id = ContextId(**json_context_id(DEFAULT_CONTEXT_UUID)) - topology_uuids = [INTERDOMAIN_TOPOLOGY_UUID] + # Add abstract device to topologies [INTERDOMAIN_TOPOLOGY_NAME] + context_id = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) + topology_uuids = [INTERDOMAIN_TOPOLOGY_NAME] for topology_uuid in topology_uuids: add_device_to_topology(self.__context_client, context_id, topology_uuid, self.__device_uuid) @@ -80,7 +80,7 @@ class AbstractDevice: # self.update_endpoints(dc_device) #elif is_network: # devices_in_admin_topology = get_devices_in_topology( - # self.__context_client, context_id, DEFAULT_TOPOLOGY_UUID) + # self.__context_client, context_id, DEFAULT_TOPOLOGY_NAME) # for device in devices_in_admin_topology: # if device_type_is_datacenter(device.device_type): continue # self.update_endpoints(device) diff --git a/src/interdomain/service/topology_abstractor/AbstractLink.py b/src/interdomain/service/topology_abstractor/AbstractLink.py index 7fe7b07b0..552d40d41 100644 --- a/src/interdomain/service/topology_abstractor/AbstractLink.py +++ b/src/interdomain/service/topology_abstractor/AbstractLink.py @@ -14,7 +14,7 @@ import copy, logging from typing import Dict, List, Optional, Tuple -from common.Constants import DEFAULT_CONTEXT_UUID, INTERDOMAIN_TOPOLOGY_UUID +from common.Constants import DEFAULT_CONTEXT_NAME, INTERDOMAIN_TOPOLOGY_NAME from common.proto.context_pb2 import ContextId, EndPointId, Link, LinkId from common.tools.context_queries.Link import add_link_to_topology, get_existing_link_uuids from common.tools.object_factory.Context import json_context_id @@ -67,9 +67,9 @@ class AbstractLink: else: self._load_existing() - # Add abstract link to topologies [INTERDOMAIN_TOPOLOGY_UUID] - context_id = ContextId(**json_context_id(DEFAULT_CONTEXT_UUID)) - topology_uuids = [INTERDOMAIN_TOPOLOGY_UUID] + # Add abstract link to topologies [INTERDOMAIN_TOPOLOGY_NAME] + context_id = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) + topology_uuids = [INTERDOMAIN_TOPOLOGY_NAME] for topology_uuid in topology_uuids: add_link_to_topology(self.__context_client, context_id, topology_uuid, self.__link_uuid) diff --git a/src/interdomain/service/topology_abstractor/TopologyAbstractor.py b/src/interdomain/service/topology_abstractor/TopologyAbstractor.py index 5729fe733..db104144e 100644 --- a/src/interdomain/service/topology_abstractor/TopologyAbstractor.py +++ b/src/interdomain/service/topology_abstractor/TopologyAbstractor.py @@ -14,7 +14,7 @@ import logging, threading from typing import Dict, Optional, Tuple -from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID, INTERDOMAIN_TOPOLOGY_UUID +from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME, INTERDOMAIN_TOPOLOGY_NAME from common.DeviceTypes import DeviceTypeEnum from common.proto.context_pb2 import ( ContextEvent, ContextId, Device, DeviceEvent, DeviceId, EndPoint, EndPointId, Link, LinkEvent, TopologyId, @@ -39,8 +39,8 @@ from .Types import EventTypes LOGGER = logging.getLogger(__name__) -ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_UUID)) -INTERDOMAIN_TOPOLOGY_ID = TopologyId(**json_topology_id(INTERDOMAIN_TOPOLOGY_UUID, context_id=ADMIN_CONTEXT_ID)) +ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) +INTERDOMAIN_TOPOLOGY_ID = TopologyId(**json_topology_id(INTERDOMAIN_TOPOLOGY_NAME, context_id=ADMIN_CONTEXT_ID)) class TopologyAbstractor(threading.Thread): def __init__(self) -> None: @@ -65,8 +65,8 @@ class TopologyAbstractor(threading.Thread): def run(self) -> None: self.context_client.connect() - create_context(self.context_client, DEFAULT_CONTEXT_UUID) - topology_uuids = [DEFAULT_TOPOLOGY_UUID, INTERDOMAIN_TOPOLOGY_UUID] + create_context(self.context_client, DEFAULT_CONTEXT_NAME) + topology_uuids = [DEFAULT_TOPOLOGY_NAME, INTERDOMAIN_TOPOLOGY_NAME] create_missing_topologies(self.context_client, ADMIN_CONTEXT_ID, topology_uuids) self.dlt_connector_client.connect() @@ -96,7 +96,7 @@ class TopologyAbstractor(threading.Thread): # context_uuid = event.topology_id.context_id.context_uuid.uuid # if context_uuid != own_context_uuid: return True # topology_uuid = event.topology_id.topology_uuid.uuid - # if topology_uuid in {INTERDOMAIN_TOPOLOGY_UUID}: return True + # if topology_uuid in {INTERDOMAIN_TOPOLOGY_NAME}: return True # # return False @@ -200,7 +200,7 @@ class TopologyAbstractor(threading.Thread): device_uuid = device.device_id.device_uuid.uuid interdomain_device_uuids = get_uuids_of_devices_in_topology( - self.context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_UUID) + self.context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_NAME) for endpoint in device.device_endpoints: if not endpoint_type_is_border(endpoint.endpoint_type): continue @@ -236,8 +236,8 @@ class TopologyAbstractor(threading.Thread): topology_uuid = topology_id.topology_uuid.uuid context_id = topology_id.context_id context_uuid = context_id.context_uuid.uuid - topology_uuids = {DEFAULT_TOPOLOGY_UUID, INTERDOMAIN_TOPOLOGY_UUID} - if (context_uuid == DEFAULT_CONTEXT_UUID) and (topology_uuid not in topology_uuids): + topology_uuids = {DEFAULT_TOPOLOGY_NAME, INTERDOMAIN_TOPOLOGY_NAME} + if (context_uuid == DEFAULT_CONTEXT_NAME) and (topology_uuid not in topology_uuids): abstract_topology_id = TopologyId(**json_topology_id(topology_uuid, context_id=ADMIN_CONTEXT_ID)) self._get_or_create_abstract_device( topology_uuid, DeviceTypeEnum.NETWORK, dlt_record_sender, abstract_topology_id) diff --git a/src/opticalcentralizedattackdetector/tests/example_objects.py b/src/opticalcentralizedattackdetector/tests/example_objects.py index 3c5a26b6d..09320f1c3 100644 --- a/src/opticalcentralizedattackdetector/tests/example_objects.py +++ b/src/opticalcentralizedattackdetector/tests/example_objects.py @@ -13,7 +13,7 @@ # limitations under the License. from copy import deepcopy -from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID +from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME from context.proto.context_pb2 import ( ConfigActionEnum, DeviceDriverEnum, DeviceOperationalStatusEnum, ServiceStatusEnum, ServiceTypeEnum) @@ -31,7 +31,7 @@ def endpoint(topology_id, device_id, endpoint_uuid, endpoint_type): return {'endpoint_id': endpoint_id(topology_id, device_id, endpoint_uuid), 'endpoint_type': endpoint_type} ## use "deepcopy" to prevent propagating forced changes during tests -CONTEXT_ID = {'context_uuid': {'uuid': DEFAULT_CONTEXT_UUID}} +CONTEXT_ID = {'context_uuid': {'uuid': DEFAULT_CONTEXT_NAME}} CONTEXT = { 'context_id': deepcopy(CONTEXT_ID), 'topology_ids': [], @@ -47,7 +47,7 @@ CONTEXT_2 = { TOPOLOGY_ID = { 'context_id': deepcopy(CONTEXT_ID), - 'topology_uuid': {'uuid': DEFAULT_TOPOLOGY_UUID}, + 'topology_uuid': {'uuid': DEFAULT_TOPOLOGY_NAME}, } TOPOLOGY = { 'topology_id': deepcopy(TOPOLOGY_ID), diff --git a/src/tests/benchmark/automation/tests/Objects.py b/src/tests/benchmark/automation/tests/Objects.py index 8ea6f5008..1e8072f8f 100644 --- a/src/tests/benchmark/automation/tests/Objects.py +++ b/src/tests/benchmark/automation/tests/Objects.py @@ -13,7 +13,7 @@ # limitations under the License. from typing import Dict, List, Tuple -from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID +from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME from common.tools.object_factory.Context import json_context, json_context_id from common.tools.object_factory.Device import ( json_device_connect_rules, json_device_emulated_connect_rules, json_device_emulated_packet_router_disabled, @@ -24,12 +24,12 @@ from common.tools.object_factory.Topology import json_topology, json_topology_id from common.proto.kpi_sample_types_pb2 import KpiSampleType # ----- Context -------------------------------------------------------------------------------------------------------- -CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_UUID) -CONTEXT = json_context(DEFAULT_CONTEXT_UUID) +CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_NAME) +CONTEXT = json_context(DEFAULT_CONTEXT_NAME) # ----- Topology ------------------------------------------------------------------------------------------------------- -TOPOLOGY_ID = json_topology_id(DEFAULT_TOPOLOGY_UUID, context_id=CONTEXT_ID) -TOPOLOGY = json_topology(DEFAULT_TOPOLOGY_UUID, context_id=CONTEXT_ID) +TOPOLOGY_ID = json_topology_id(DEFAULT_TOPOLOGY_NAME, context_id=CONTEXT_ID) +TOPOLOGY = json_topology(DEFAULT_TOPOLOGY_NAME, context_id=CONTEXT_ID) # ----- Monitoring Samples --------------------------------------------------------------------------------------------- PACKET_PORT_SAMPLE_TYPES = [ diff --git a/src/tests/benchmark/policy/tests/test_functional_delete_service.py b/src/tests/benchmark/policy/tests/test_functional_delete_service.py index 0f8d08801..48c2a0d5a 100644 --- a/src/tests/benchmark/policy/tests/test_functional_delete_service.py +++ b/src/tests/benchmark/policy/tests/test_functional_delete_service.py @@ -13,7 +13,7 @@ # limitations under the License. import logging -from common.Constants import DEFAULT_CONTEXT_UUID +from common.Constants import DEFAULT_CONTEXT_NAME from common.DeviceTypes import DeviceTypeEnum from common.proto.context_pb2 import ContextId, Empty, ServiceTypeEnum from common.tools.descriptor.Loader import DescriptorLoader @@ -55,7 +55,7 @@ def test_service_removal(context_client : ContextClient, osm_wim : MockOSM): # p assert len(response.links) == descriptor_loader.num_links l3nm_service_uuids = set() - response = context_client.ListServices(ContextId(**json_context_id(DEFAULT_CONTEXT_UUID))) + response = context_client.ListServices(ContextId(**json_context_id(DEFAULT_CONTEXT_NAME))) assert len(response.services) == 2 # OLS & L3NM => (L3NM + TAPI) for service in response.services: service_id = service.service_id diff --git a/src/tests/ecoc22/tests/Objects_BigNet.py b/src/tests/ecoc22/tests/Objects_BigNet.py index 592376ff9..b9e70517c 100644 --- a/src/tests/ecoc22/tests/Objects_BigNet.py +++ b/src/tests/ecoc22/tests/Objects_BigNet.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID +from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME from common.tools.object_factory.Context import json_context, json_context_id from common.tools.object_factory.Device import ( json_device_emulated_connect_rules, json_device_emulated_datacenter_disabled, @@ -21,13 +21,13 @@ from common.tools.object_factory.Topology import json_topology, json_topology_id from .Tools import compose_bearer, compose_service_endpoint_id, json_endpoint_ids, link # ----- Context -------------------------------------------------------------------------------------------------------- -CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_UUID) -CONTEXT = json_context(DEFAULT_CONTEXT_UUID) +CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_NAME) +CONTEXT = json_context(DEFAULT_CONTEXT_NAME) # ----- Topology ------------------------------------------------------------------------------------------------------- -TOPOLOGY_ID = json_topology_id(DEFAULT_TOPOLOGY_UUID, context_id=CONTEXT_ID) -TOPOLOGY = json_topology(DEFAULT_TOPOLOGY_UUID, context_id=CONTEXT_ID) +TOPOLOGY_ID = json_topology_id(DEFAULT_TOPOLOGY_NAME, context_id=CONTEXT_ID) +TOPOLOGY = json_topology(DEFAULT_TOPOLOGY_NAME, context_id=CONTEXT_ID) # ----- Customer Equipment (CE) Devices -------------------------------------------------------------------------------- diff --git a/src/tests/ecoc22/tests/Objects_DC_CSGW_OLS.py b/src/tests/ecoc22/tests/Objects_DC_CSGW_OLS.py index 94d205a64..37ceeae6a 100644 --- a/src/tests/ecoc22/tests/Objects_DC_CSGW_OLS.py +++ b/src/tests/ecoc22/tests/Objects_DC_CSGW_OLS.py @@ -13,7 +13,7 @@ # limitations under the License. import os, uuid -from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID +from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME from common.tools.object_factory.Context import json_context, json_context_id from common.tools.object_factory.Device import ( json_device_emulated_connect_rules, json_device_emulated_datacenter_disabled, @@ -68,12 +68,12 @@ def compose_service(endpoint_a, endpoint_z, constraints=[]): return service # ----- Context -------------------------------------------------------------------------------------------------------- -CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_UUID) -CONTEXT = json_context(DEFAULT_CONTEXT_UUID) +CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_NAME) +CONTEXT = json_context(DEFAULT_CONTEXT_NAME) # ----- Domains -------------------------------------------------------------------------------------------------------- # Overall network topology -TOPO_ADMIN_UUID = DEFAULT_TOPOLOGY_UUID +TOPO_ADMIN_UUID = DEFAULT_TOPOLOGY_NAME TOPO_ADMIN_ID = json_topology_id(TOPO_ADMIN_UUID, context_id=CONTEXT_ID) TOPO_ADMIN = json_topology(TOPO_ADMIN_UUID, context_id=CONTEXT_ID) diff --git a/src/tests/ecoc22/tests/Objects_DC_CSGW_TN.py b/src/tests/ecoc22/tests/Objects_DC_CSGW_TN.py index 229e3d5fe..f29999d6c 100644 --- a/src/tests/ecoc22/tests/Objects_DC_CSGW_TN.py +++ b/src/tests/ecoc22/tests/Objects_DC_CSGW_TN.py @@ -13,7 +13,7 @@ # limitations under the License. import os -from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID +from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME from common.tools.object_factory.Context import json_context, json_context_id from common.tools.object_factory.Device import ( json_device_emulated_connect_rules, json_device_emulated_datacenter_disabled, @@ -59,12 +59,12 @@ def compose_service(endpoint_a, endpoint_z, constraints=[]): return service # ----- Context -------------------------------------------------------------------------------------------------------- -CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_UUID) -CONTEXT = json_context(DEFAULT_CONTEXT_UUID) +CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_NAME) +CONTEXT = json_context(DEFAULT_CONTEXT_NAME) # ----- Domains -------------------------------------------------------------------------------------------------------- # Overall network topology -TOPO_ADMIN_UUID = DEFAULT_TOPOLOGY_UUID +TOPO_ADMIN_UUID = DEFAULT_TOPOLOGY_NAME TOPO_ADMIN_ID = json_topology_id(TOPO_ADMIN_UUID, context_id=CONTEXT_ID) TOPO_ADMIN = json_topology(TOPO_ADMIN_UUID, context_id=CONTEXT_ID) diff --git a/src/tests/ecoc22/tests/Objects_DC_CSGW_TN_OLS.py b/src/tests/ecoc22/tests/Objects_DC_CSGW_TN_OLS.py index 7063265f4..d6a0dad6d 100644 --- a/src/tests/ecoc22/tests/Objects_DC_CSGW_TN_OLS.py +++ b/src/tests/ecoc22/tests/Objects_DC_CSGW_TN_OLS.py @@ -13,7 +13,7 @@ # limitations under the License. import os, uuid -from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID +from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME from common.tools.object_factory.Context import json_context, json_context_id from common.tools.object_factory.Device import ( json_device_emulated_connect_rules, json_device_emulated_datacenter_disabled, @@ -68,12 +68,12 @@ def compose_service(endpoint_a, endpoint_z, constraints=[]): return service # ----- Context -------------------------------------------------------------------------------------------------------- -CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_UUID) -CONTEXT = json_context(DEFAULT_CONTEXT_UUID) +CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_NAME) +CONTEXT = json_context(DEFAULT_CONTEXT_NAME) # ----- Domains -------------------------------------------------------------------------------------------------------- # Overall network topology -TOPO_ADMIN_UUID = DEFAULT_TOPOLOGY_UUID +TOPO_ADMIN_UUID = DEFAULT_TOPOLOGY_NAME TOPO_ADMIN_ID = json_topology_id(TOPO_ADMIN_UUID, context_id=CONTEXT_ID) TOPO_ADMIN = json_topology(TOPO_ADMIN_UUID, context_id=CONTEXT_ID) diff --git a/src/tests/oeccpsc22/tests/Objects_Domain_1.py b/src/tests/oeccpsc22/tests/Objects_Domain_1.py index 8b26348c9..3f0f680df 100644 --- a/src/tests/oeccpsc22/tests/Objects_Domain_1.py +++ b/src/tests/oeccpsc22/tests/Objects_Domain_1.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID +from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME from common.tools.object_factory.Context import json_context, json_context_id from common.tools.object_factory.Device import ( json_device_emulated_connect_rules, json_device_emulated_packet_router_disabled, json_device_id) @@ -21,12 +21,12 @@ from common.tools.object_factory.Topology import json_topology, json_topology_id from .Tools import get_link_uuid, json_endpoint_ids # ----- Context -------------------------------------------------------------------------------------------------------- -D1_CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_UUID) -D1_CONTEXT = json_context(DEFAULT_CONTEXT_UUID) +D1_CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_NAME) +D1_CONTEXT = json_context(DEFAULT_CONTEXT_NAME) # ----- Topology ------------------------------------------------------------------------------------------------------- -D1_TOPOLOGY_ID = json_topology_id(DEFAULT_TOPOLOGY_UUID, context_id=D1_CONTEXT_ID) -D1_TOPOLOGY = json_topology(DEFAULT_TOPOLOGY_UUID, context_id=D1_CONTEXT_ID) +D1_TOPOLOGY_ID = json_topology_id(DEFAULT_TOPOLOGY_NAME, context_id=D1_CONTEXT_ID) +D1_TOPOLOGY = json_topology(DEFAULT_TOPOLOGY_NAME, context_id=D1_CONTEXT_ID) # ----- Devices -------------------------------------------------------------------------------------------------------- # Assume all devices have the same architecture of endpoints diff --git a/src/tests/oeccpsc22/tests/Objects_Domain_2.py b/src/tests/oeccpsc22/tests/Objects_Domain_2.py index f91338092..e8a537253 100644 --- a/src/tests/oeccpsc22/tests/Objects_Domain_2.py +++ b/src/tests/oeccpsc22/tests/Objects_Domain_2.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID +from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME from common.tools.object_factory.Context import json_context, json_context_id from common.tools.object_factory.Device import ( json_device_emulated_connect_rules, json_device_emulated_packet_router_disabled, json_device_id) @@ -21,12 +21,12 @@ from common.tools.object_factory.Topology import json_topology, json_topology_id from .Tools import get_link_uuid, json_endpoint_ids # ----- Context -------------------------------------------------------------------------------------------------------- -D2_CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_UUID) -D2_CONTEXT = json_context(DEFAULT_CONTEXT_UUID) +D2_CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_NAME) +D2_CONTEXT = json_context(DEFAULT_CONTEXT_NAME) # ----- Topology ------------------------------------------------------------------------------------------------------- -D2_TOPOLOGY_ID = json_topology_id(DEFAULT_TOPOLOGY_UUID, context_id=D2_CONTEXT_ID) -D2_TOPOLOGY = json_topology(DEFAULT_TOPOLOGY_UUID, context_id=D2_CONTEXT_ID) +D2_TOPOLOGY_ID = json_topology_id(DEFAULT_TOPOLOGY_NAME, context_id=D2_CONTEXT_ID) +D2_TOPOLOGY = json_topology(DEFAULT_TOPOLOGY_NAME, context_id=D2_CONTEXT_ID) # ----- Devices -------------------------------------------------------------------------------------------------------- # Assume all devices have the same architecture of endpoints diff --git a/src/tests/ofc22/tests/ObjectsXr.py b/src/tests/ofc22/tests/ObjectsXr.py index 0cb223de2..f743e7a81 100644 --- a/src/tests/ofc22/tests/ObjectsXr.py +++ b/src/tests/ofc22/tests/ObjectsXr.py @@ -13,7 +13,7 @@ # limitations under the License. from typing import Dict, List, Tuple -from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID +from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME from common.tools.object_factory.Context import json_context, json_context_id from common.tools.object_factory.Device import ( json_device_connect_rules, json_device_emulated_connect_rules, json_device_emulated_packet_router_disabled, @@ -24,12 +24,12 @@ from common.tools.object_factory.Topology import json_topology, json_topology_id from common.proto.kpi_sample_types_pb2 import KpiSampleType # ----- Context -------------------------------------------------------------------------------------------------------- -CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_UUID) -CONTEXT = json_context(DEFAULT_CONTEXT_UUID) +CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_NAME) +CONTEXT = json_context(DEFAULT_CONTEXT_NAME) # ----- Topology ------------------------------------------------------------------------------------------------------- -TOPOLOGY_ID = json_topology_id(DEFAULT_TOPOLOGY_UUID, context_id=CONTEXT_ID) -TOPOLOGY = json_topology(DEFAULT_TOPOLOGY_UUID, context_id=CONTEXT_ID) +TOPOLOGY_ID = json_topology_id(DEFAULT_TOPOLOGY_NAME, context_id=CONTEXT_ID) +TOPOLOGY = json_topology(DEFAULT_TOPOLOGY_NAME, context_id=CONTEXT_ID) # ----- Monitoring Samples --------------------------------------------------------------------------------------------- PACKET_PORT_SAMPLE_TYPES = [ diff --git a/src/tests/ofc22/tests/test_functional_delete_service.py b/src/tests/ofc22/tests/test_functional_delete_service.py index 0f8d08801..48c2a0d5a 100644 --- a/src/tests/ofc22/tests/test_functional_delete_service.py +++ b/src/tests/ofc22/tests/test_functional_delete_service.py @@ -13,7 +13,7 @@ # limitations under the License. import logging -from common.Constants import DEFAULT_CONTEXT_UUID +from common.Constants import DEFAULT_CONTEXT_NAME from common.DeviceTypes import DeviceTypeEnum from common.proto.context_pb2 import ContextId, Empty, ServiceTypeEnum from common.tools.descriptor.Loader import DescriptorLoader @@ -55,7 +55,7 @@ def test_service_removal(context_client : ContextClient, osm_wim : MockOSM): # p assert len(response.links) == descriptor_loader.num_links l3nm_service_uuids = set() - response = context_client.ListServices(ContextId(**json_context_id(DEFAULT_CONTEXT_UUID))) + response = context_client.ListServices(ContextId(**json_context_id(DEFAULT_CONTEXT_NAME))) assert len(response.services) == 2 # OLS & L3NM => (L3NM + TAPI) for service in response.services: service_id = service.service_id diff --git a/src/tests/p4/tests/Objects.py b/src/tests/p4/tests/Objects.py index 0473207a8..544fe35ee 100644 --- a/src/tests/p4/tests/Objects.py +++ b/src/tests/p4/tests/Objects.py @@ -14,7 +14,7 @@ import os from typing import Dict, List, Tuple -from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID +from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME from common.tools.object_factory.Context import json_context, json_context_id from common.tools.object_factory.Device import ( json_device_connect_rules, json_device_emulated_connect_rules, json_device_emulated_packet_router_disabled, @@ -30,12 +30,12 @@ from common.tools.object_factory.Topology import json_topology, json_topology_id from common.proto.kpi_sample_types_pb2 import KpiSampleType # ----- Context -------------------------------------------------------------------------------------------------------- -CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_UUID) -CONTEXT = json_context(DEFAULT_CONTEXT_UUID) +CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_NAME) +CONTEXT = json_context(DEFAULT_CONTEXT_NAME) # ----- Topology ------------------------------------------------------------------------------------------------------- -TOPOLOGY_ID = json_topology_id(DEFAULT_TOPOLOGY_UUID, context_id=CONTEXT_ID) -TOPOLOGY = json_topology(DEFAULT_TOPOLOGY_UUID, context_id=CONTEXT_ID) +TOPOLOGY_ID = json_topology_id(DEFAULT_TOPOLOGY_NAME, context_id=CONTEXT_ID) +TOPOLOGY = json_topology(DEFAULT_TOPOLOGY_NAME, context_id=CONTEXT_ID) # ----- Monitoring Samples --------------------------------------------------------------------------------------------- PACKET_PORT_SAMPLE_TYPES = [ -- GitLab From 83d1d42ae0b8ec881948bb1846bda7c8773c4bc9 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Wed, 25 Jan 2023 12:43:17 +0000 Subject: [PATCH 123/158] Service component: - added pathcomp to CI/CD unitary tests --- src/service/.gitlab-ci.yml | 30 +++++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/src/service/.gitlab-ci.yml b/src/service/.gitlab-ci.yml index c40bc90cf..98d709896 100644 --- a/src/service/.gitlab-ci.yml +++ b/src/service/.gitlab-ci.yml @@ -46,20 +46,40 @@ unit test service: stage: unit_test needs: - build service + - build pathcomp before_script: - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY - - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create -d bridge teraflowbridge; fi + - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create --driver=bridge --subnet=172.28.0.0/24 --gateway=172.28.0.254 teraflowbridge; fi - if docker container ls | grep $IMAGE_NAME; then docker rm -f $IMAGE_NAME; else echo "$IMAGE_NAME image is not in the system"; fi + - if docker container ls | grep pathcomp-frontend; then docker rm -f pathcomp-frontend; else echo "pathcomp-frontend image is not in the system"; fi + - if docker container ls | grep pathcomp-backend; then docker rm -f pathcomp-backend; else echo "pathcomp-backend image is not in the system"; fi script: - docker pull "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG" + - docker pull "$CI_REGISTRY_IMAGE/pathcomp-frontend:$IMAGE_TAG" + - docker pull "$CI_REGISTRY_IMAGE/pathcomp-backend:$IMAGE_TAG" + - docker run --name pathcomp-backend -d -p 8081:8081 --network=teraflowbridge --ip 172.28.0.1 $CI_REGISTRY_IMAGE/pathcomp-backend:$IMAGE_TAG + - sleep 1 + - docker run --name pathcomp-frontend -d -p 10020:10020 --env "PATHCOMP_BACKEND_HOST=172.28.0.1" --env "PATHCOMP_BACKEND_PORT=8081" --network=teraflowbridge --ip 172.28.0.2 $CI_REGISTRY_IMAGE/pathcomp-frontend:$IMAGE_TAG + - sleep 1 - docker run --name $IMAGE_NAME -d -p 3030:3030 -v "$PWD/src/$IMAGE_NAME/tests:/opt/results" --network=teraflowbridge $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG - sleep 5 - docker ps -a + - docker logs pathcomp-frontend + - docker logs pathcomp-backend - docker logs $IMAGE_NAME - docker exec -i $IMAGE_NAME bash -c "coverage run -m pytest --log-level=INFO --verbose $IMAGE_NAME/tests/test_unitary.py --junitxml=/opt/results/${IMAGE_NAME}_report.xml" - docker exec -i $IMAGE_NAME bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing" + - docker logs pathcomp-frontend + - docker logs pathcomp-backend + - docker logs $IMAGE_NAME coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/' after_script: + - docker ps -a + - docker logs pathcomp-frontend + - docker logs pathcomp-backend + - docker logs ${IMAGE_NAME} + - docker rm -f pathcomp-frontend + - docker rm -f pathcomp-backend - docker rm -f $IMAGE_NAME - docker network rm teraflowbridge rules: @@ -72,7 +92,15 @@ unit test service: - src/$IMAGE_NAME/Dockerfile - src/$IMAGE_NAME/tests/*.py - src/$IMAGE_NAME/tests/Dockerfile + - src/pathcomp/.gitlab-ci.yml + - src/pathcomp/frontend/**/*.{py,in,yml} + - src/pathcomp/frontend/Dockerfile + - src/pathcomp/frontend/tests/*.py + - src/pathcomp/backend/**/*.{c,h,conf} + - src/pathcomp/backend/Makefile + - src/pathcomp/backend/Dockerfile - manifests/${IMAGE_NAME}service.yaml + - manifests/pathcompservice.yaml - .gitlab-ci.yml artifacts: when: always -- GitLab From 6ead3e7410dc97896d213c473518fc91459e73ce Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Wed, 25 Jan 2023 13:50:51 +0000 Subject: [PATCH 124/158] CI/CD pipeline - updated pipeline to meet requirements and dependencies --- .gitlab-ci.yml | 19 ++++++----- src/automation/.gitlab-ci.yml | 38 +++++++++++----------- src/compute/.gitlab-ci.yml | 54 +++++++++++++++---------------- src/context/.gitlab-ci.yml | 54 +++++++++++++++---------------- src/device/.gitlab-ci.yml | 54 +++++++++++++++---------------- src/monitoring/.gitlab-ci.yml | 54 +++++++++++++++---------------- src/pathcomp/.gitlab-ci.yml | 61 ++++++++++++++++++----------------- src/policy/.gitlab-ci.yml | 38 +++++++++++----------- src/service/.gitlab-ci.yml | 9 ++++-- src/slice/.gitlab-ci.yml | 55 +++++++++++++++---------------- 10 files changed, 222 insertions(+), 214 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 316a38f23..dbc43f278 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -16,11 +16,14 @@ stages: #- dependencies - build + - build - test - - unit_test - - integ_test - - deploy - - funct_test + - unit_test_stage1 + - unit_test_stage2 + - unit_test_stage3 + - unit_test_stage4 + #- deploy + #- end2end_test # include the individual .gitlab-ci.yml of each micro-service include: @@ -30,12 +33,12 @@ include: - local: '/src/context/.gitlab-ci.yml' - local: '/src/device/.gitlab-ci.yml' - local: '/src/service/.gitlab-ci.yml' - - local: '/src/dbscanserving/.gitlab-ci.yml' - - local: '/src/opticalattackmitigator/.gitlab-ci.yml' - - local: '/src/opticalcentralizedattackdetector/.gitlab-ci.yml' + #- local: '/src/dbscanserving/.gitlab-ci.yml' + #- local: '/src/opticalattackmitigator/.gitlab-ci.yml' + #- local: '/src/opticalcentralizedattackdetector/.gitlab-ci.yml' - local: '/src/automation/.gitlab-ci.yml' - local: '/src/policy/.gitlab-ci.yml' - - local: '/src/webui/.gitlab-ci.yml' + #- local: '/src/webui/.gitlab-ci.yml' #- local: '/src/l3_distributedattackdetector/.gitlab-ci.yml' #- local: '/src/l3_centralizedattackdetector/.gitlab-ci.yml' #- local: '/src/l3_attackmitigator/.gitlab-ci.yml' diff --git a/src/automation/.gitlab-ci.yml b/src/automation/.gitlab-ci.yml index 87d141d5b..610cf8eec 100644 --- a/src/automation/.gitlab-ci.yml +++ b/src/automation/.gitlab-ci.yml @@ -42,7 +42,7 @@ build automation: unit_test automation: variables: REPORTS_CONTAINER: "${IMAGE_NAME}-reports" - stage: unit_test + stage: unit_test_stage1 needs: - build automation before_script: @@ -79,22 +79,22 @@ unit_test automation: - manifests/${IMAGE_NAME}service.yaml - .gitlab-ci.yml -# Deployment of automation service in Kubernetes Cluster -deploy automation: - stage: deploy - needs: - - build automation - - unit_test automation - script: - - kubectl version - - kubectl get all - - kubectl delete --ignore-not-found=true -f "manifests/automationservice.yaml" - - kubectl apply -f "manifests/automationservice.yaml" - - kubectl delete pods --selector app=automationservice - - kubectl get all - rules: - - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' - when: manual - - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' - when: manual +## Deployment of automation service in Kubernetes Cluster +#deploy automation: +# stage: deploy +# needs: +# - build automation +# - unit_test automation +# script: +# - kubectl version +# - kubectl get all +# - kubectl delete --ignore-not-found=true -f "manifests/automationservice.yaml" +# - kubectl apply -f "manifests/automationservice.yaml" +# - kubectl delete pods --selector app=automationservice +# - kubectl get all +# rules: +# - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' +# when: manual +# - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' +# when: manual diff --git a/src/compute/.gitlab-ci.yml b/src/compute/.gitlab-ci.yml index 52b36e819..fdf8af236 100644 --- a/src/compute/.gitlab-ci.yml +++ b/src/compute/.gitlab-ci.yml @@ -39,11 +39,11 @@ build compute: - .gitlab-ci.yml # Apply unit test to the component -unit test compute: +unit_test compute: variables: IMAGE_NAME: 'compute' # name of the microservice IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) - stage: unit_test + stage: unit_test_stage1 needs: - build compute before_script: @@ -79,28 +79,28 @@ unit test compute: reports: junit: src/$IMAGE_NAME/tests/${IMAGE_NAME}_report.xml -# Deployment of the service in Kubernetes Cluster -deploy compute: - variables: - IMAGE_NAME: 'compute' # name of the microservice - IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) - stage: deploy - needs: - - unit test compute - # - integ_test execute - script: - - 'sed -i "s/$IMAGE_NAME:.*/$IMAGE_NAME:$IMAGE_TAG/" manifests/${IMAGE_NAME}service.yaml' - - kubectl version - - kubectl get all - - kubectl apply -f "manifests/${IMAGE_NAME}service.yaml" - - kubectl get all - # environment: - # name: test - # url: https://example.com - # kubernetes: - # namespace: test - rules: - - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' - when: manual - - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' - when: manual +## Deployment of the service in Kubernetes Cluster +#deploy compute: +# variables: +# IMAGE_NAME: 'compute' # name of the microservice +# IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) +# stage: deploy +# needs: +# - unit test compute +# # - integ_test execute +# script: +# - 'sed -i "s/$IMAGE_NAME:.*/$IMAGE_NAME:$IMAGE_TAG/" manifests/${IMAGE_NAME}service.yaml' +# - kubectl version +# - kubectl get all +# - kubectl apply -f "manifests/${IMAGE_NAME}service.yaml" +# - kubectl get all +# # environment: +# # name: test +# # url: https://example.com +# # kubernetes: +# # namespace: test +# rules: +# - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' +# when: manual +# - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' +# when: manual diff --git a/src/context/.gitlab-ci.yml b/src/context/.gitlab-ci.yml index 29b5fb9db..7d2a95e7c 100644 --- a/src/context/.gitlab-ci.yml +++ b/src/context/.gitlab-ci.yml @@ -39,11 +39,11 @@ build context: - .gitlab-ci.yml # Apply unit test to the component -unit test context: +unit_test context: variables: IMAGE_NAME: 'context' # name of the microservice IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) - stage: unit_test + stage: unit_test_stage1 needs: - build context before_script: @@ -115,28 +115,28 @@ unit test context: reports: junit: src/$IMAGE_NAME/tests/${IMAGE_NAME}_report.xml -# Deployment of the service in Kubernetes Cluster -deploy context: - variables: - IMAGE_NAME: 'context' # name of the microservice - IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) - stage: deploy - needs: - - unit test context - # - integ_test execute - script: - - 'sed -i "s/$IMAGE_NAME:.*/$IMAGE_NAME:$IMAGE_TAG/" manifests/${IMAGE_NAME}service.yaml' - - kubectl version - - kubectl get all - - kubectl apply -f "manifests/${IMAGE_NAME}service.yaml" - - kubectl get all - # environment: - # name: test - # url: https://example.com - # kubernetes: - # namespace: test - rules: - - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' - when: manual - - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' - when: manual +## Deployment of the service in Kubernetes Cluster +#deploy context: +# variables: +# IMAGE_NAME: 'context' # name of the microservice +# IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) +# stage: deploy +# needs: +# - unit test context +# # - integ_test execute +# script: +# - 'sed -i "s/$IMAGE_NAME:.*/$IMAGE_NAME:$IMAGE_TAG/" manifests/${IMAGE_NAME}service.yaml' +# - kubectl version +# - kubectl get all +# - kubectl apply -f "manifests/${IMAGE_NAME}service.yaml" +# - kubectl get all +# # environment: +# # name: test +# # url: https://example.com +# # kubernetes: +# # namespace: test +# rules: +# - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' +# when: manual +# - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' +# when: manual diff --git a/src/device/.gitlab-ci.yml b/src/device/.gitlab-ci.yml index 3da19e7a3..cb447e2e5 100644 --- a/src/device/.gitlab-ci.yml +++ b/src/device/.gitlab-ci.yml @@ -39,11 +39,11 @@ build device: - .gitlab-ci.yml # Apply unit test to the component -unit test device: +unit_test device: variables: IMAGE_NAME: 'device' # name of the microservice IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) - stage: unit_test + stage: unit_test_stage1 needs: - build device before_script: @@ -79,28 +79,28 @@ unit test device: reports: junit: src/$IMAGE_NAME/tests/${IMAGE_NAME}_report.xml -# Deployment of the service in Kubernetes Cluster -deploy device: - variables: - IMAGE_NAME: 'device' # name of the microservice - IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) - stage: deploy - needs: - - unit test device - # - integ_test execute - script: - - 'sed -i "s/$IMAGE_NAME:.*/$IMAGE_NAME:$IMAGE_TAG/" manifests/${IMAGE_NAME}service.yaml' - - kubectl version - - kubectl get all - - kubectl apply -f "manifests/${IMAGE_NAME}service.yaml" - - kubectl get all - # environment: - # name: test - # url: https://example.com - # kubernetes: - # namespace: test - rules: - - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' - when: manual - - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' - when: manual +## Deployment of the service in Kubernetes Cluster +#deploy device: +# variables: +# IMAGE_NAME: 'device' # name of the microservice +# IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) +# stage: deploy +# needs: +# - unit test device +# # - integ_test execute +# script: +# - 'sed -i "s/$IMAGE_NAME:.*/$IMAGE_NAME:$IMAGE_TAG/" manifests/${IMAGE_NAME}service.yaml' +# - kubectl version +# - kubectl get all +# - kubectl apply -f "manifests/${IMAGE_NAME}service.yaml" +# - kubectl get all +# # environment: +# # name: test +# # url: https://example.com +# # kubernetes: +# # namespace: test +# rules: +# - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' +# when: manual +# - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' +# when: manual diff --git a/src/monitoring/.gitlab-ci.yml b/src/monitoring/.gitlab-ci.yml index ef3a8c39a..1dfefa1ee 100644 --- a/src/monitoring/.gitlab-ci.yml +++ b/src/monitoring/.gitlab-ci.yml @@ -39,11 +39,11 @@ build monitoring: - .gitlab-ci.yml # Apply unit test to the component -unit test monitoring: +unit_test monitoring: variables: IMAGE_NAME: 'monitoring' # name of the microservice IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) - stage: unit_test + stage: unit_test_stage1 needs: - build monitoring before_script: @@ -84,28 +84,28 @@ unit test monitoring: reports: junit: src/$IMAGE_NAME/tests/${IMAGE_NAME}_report.xml -# Deployment of the service in Kubernetes Cluster -deploy monitoring: - variables: - IMAGE_NAME: 'monitoring' # name of the microservice - IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) - stage: deploy - needs: - - unit test monitoring - # - integ_test execute - script: - - 'sed -i "s/$IMAGE_NAME:.*/$IMAGE_NAME:$IMAGE_TAG/" manifests/${IMAGE_NAME}service.yaml' - - kubectl version - - kubectl get all - - kubectl apply -f "manifests/${IMAGE_NAME}service.yaml" - - kubectl get all - # environment: - # name: test - # url: https://example.com - # kubernetes: - # namespace: test - rules: - - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' - when: manual - - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' - when: manual +## Deployment of the service in Kubernetes Cluster +#deploy monitoring: +# variables: +# IMAGE_NAME: 'monitoring' # name of the microservice +# IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) +# stage: deploy +# needs: +# - unit test monitoring +# # - integ_test execute +# script: +# - 'sed -i "s/$IMAGE_NAME:.*/$IMAGE_NAME:$IMAGE_TAG/" manifests/${IMAGE_NAME}service.yaml' +# - kubectl version +# - kubectl get all +# - kubectl apply -f "manifests/${IMAGE_NAME}service.yaml" +# - kubectl get all +# # environment: +# # name: test +# # url: https://example.com +# # kubernetes: +# # namespace: test +# rules: +# - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' +# when: manual +# - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' +# when: manual diff --git a/src/pathcomp/.gitlab-ci.yml b/src/pathcomp/.gitlab-ci.yml index a45e735e4..5749a94fe 100644 --- a/src/pathcomp/.gitlab-ci.yml +++ b/src/pathcomp/.gitlab-ci.yml @@ -48,11 +48,11 @@ build pathcomp: - .gitlab-ci.yml # Apply unit test to the component -unit test pathcomp-backend: +unit_test pathcomp-backend: variables: IMAGE_NAME: 'pathcomp' # name of the microservice IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) - stage: unit_test + stage: unit_test_stage1 needs: - build pathcomp before_script: @@ -93,13 +93,14 @@ unit test pathcomp-backend: # junit: src/$IMAGE_NAME/backend/tests/${IMAGE_NAME}-backend_report.xml # Apply unit test to the component -unit test pathcomp-frontend: +unit_test pathcomp-frontend: variables: IMAGE_NAME: 'pathcomp' # name of the microservice IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) - stage: unit_test + stage: unit_test_stage2 needs: - build pathcomp + - unit_test pathcomp-backend before_script: - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create --driver=bridge --subnet=172.28.0.0/24 --gateway=172.28.0.254 teraflowbridge; fi @@ -147,29 +148,29 @@ unit test pathcomp-frontend: reports: junit: src/$IMAGE_NAME/frontend/tests/${IMAGE_NAME}-frontend_report.xml -# Deployment of the service in Kubernetes Cluster -deploy pathcomp: - variables: - IMAGE_NAME: 'pathcomp' # name of the microservice - IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) - stage: deploy - needs: - - unit test pathcomp-backend - - unit test pathcomp-frontend - # - integ_test execute - script: - - 'sed -i "s/$IMAGE_NAME:.*/$IMAGE_NAME:$IMAGE_TAG/" manifests/${IMAGE_NAME}service.yaml' - - kubectl version - - kubectl get all - - kubectl apply -f "manifests/${IMAGE_NAME}service.yaml" - - kubectl get all - # environment: - # name: test - # url: https://example.com - # kubernetes: - # namespace: test - rules: - - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' - when: manual - - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' - when: manual +## Deployment of the service in Kubernetes Cluster +#deploy pathcomp: +# variables: +# IMAGE_NAME: 'pathcomp' # name of the microservice +# IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) +# stage: deploy +# needs: +# - unit test pathcomp-backend +# - unit test pathcomp-frontend +# # - integ_test execute +# script: +# - 'sed -i "s/$IMAGE_NAME:.*/$IMAGE_NAME:$IMAGE_TAG/" manifests/${IMAGE_NAME}service.yaml' +# - kubectl version +# - kubectl get all +# - kubectl apply -f "manifests/${IMAGE_NAME}service.yaml" +# - kubectl get all +# # environment: +# # name: test +# # url: https://example.com +# # kubernetes: +# # namespace: test +# rules: +# - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' +# when: manual +# - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' +# when: manual diff --git a/src/policy/.gitlab-ci.yml b/src/policy/.gitlab-ci.yml index 164540a05..d19da81d6 100644 --- a/src/policy/.gitlab-ci.yml +++ b/src/policy/.gitlab-ci.yml @@ -42,7 +42,7 @@ build policy: unit_test policy: variables: REPORTS_CONTAINER: "${IMAGE_NAME_POLICY}-reports" - stage: unit_test + stage: unit_test_stage1 needs: - build policy before_script: @@ -79,21 +79,21 @@ unit_test policy: - manifests/${IMAGE_NAME_POLICY}service.yaml - .gitlab-ci.yml -# Deployment of policy service in Kubernetes Cluster -deploy policy: - stage: deploy - needs: - - build policy - - unit_test policy - script: - - kubectl version - - kubectl get all - - kubectl delete --ignore-not-found=true -f "manifests/policyservice.yaml" - - kubectl apply -f "manifests/policyservice.yaml" - - kubectl delete pods --selector app=policyservice - - kubectl get all - rules: - - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' - when: manual - - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' - when: manual \ No newline at end of file +## Deployment of policy service in Kubernetes Cluster +#deploy policy: +# stage: deploy +# needs: +# - build policy +# - unit_test policy +# script: +# - kubectl version +# - kubectl get all +# - kubectl delete --ignore-not-found=true -f "manifests/policyservice.yaml" +# - kubectl apply -f "manifests/policyservice.yaml" +# - kubectl delete pods --selector app=policyservice +# - kubectl get all +# rules: +# - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' +# when: manual +# - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' +# when: manual \ No newline at end of file diff --git a/src/service/.gitlab-ci.yml b/src/service/.gitlab-ci.yml index 98d709896..b363c1881 100644 --- a/src/service/.gitlab-ci.yml +++ b/src/service/.gitlab-ci.yml @@ -39,14 +39,17 @@ build service: - .gitlab-ci.yml # Apply unit test to the component -unit test service: +unit_test service: variables: IMAGE_NAME: 'service' # name of the microservice IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) - stage: unit_test + stage: unit_test_stage3 needs: - build service - - build pathcomp + - unit_test context + - unit_test device + - unit_test pathcomp-backend + - unit_test pathcomp-frontend before_script: - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create --driver=bridge --subnet=172.28.0.0/24 --gateway=172.28.0.254 teraflowbridge; fi diff --git a/src/slice/.gitlab-ci.yml b/src/slice/.gitlab-ci.yml index 9393e6b29..5fbff7029 100644 --- a/src/slice/.gitlab-ci.yml +++ b/src/slice/.gitlab-ci.yml @@ -39,13 +39,14 @@ build slice: - .gitlab-ci.yml # Apply unit test to the component -unit test slice: +unit_test slice: variables: IMAGE_NAME: 'slice' # name of the microservice IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) - stage: unit_test + stage: unit_test_stage4 needs: - build slice + - unit_test service before_script: - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create -d bridge teraflowbridge; fi @@ -79,28 +80,28 @@ unit test slice: reports: junit: src/$IMAGE_NAME/tests/${IMAGE_NAME}_report.xml -# Deployment of the service in Kubernetes Cluster -deploy slice: - variables: - IMAGE_NAME: 'slice' # name of the microservice - IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) - stage: deploy - needs: - - unit test slice - # - integ_test execute - script: - - 'sed -i "s/$IMAGE_NAME:.*/$IMAGE_NAME:$IMAGE_TAG/" manifests/${IMAGE_NAME}service.yaml' - - kubectl version - - kubectl get all - - kubectl apply -f "manifests/${IMAGE_NAME}service.yaml" - - kubectl get all - # environment: - # name: test - # url: https://example.com - # kubernetes: - # namespace: test - rules: - - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' - when: manual - - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' - when: manual +## Deployment of the service in Kubernetes Cluster +#deploy slice: +# variables: +# IMAGE_NAME: 'slice' # name of the microservice +# IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) +# stage: deploy +# needs: +# - unit test slice +# # - integ_test execute +# script: +# - 'sed -i "s/$IMAGE_NAME:.*/$IMAGE_NAME:$IMAGE_TAG/" manifests/${IMAGE_NAME}service.yaml' +# - kubectl version +# - kubectl get all +# - kubectl apply -f "manifests/${IMAGE_NAME}service.yaml" +# - kubectl get all +# # environment: +# # name: test +# # url: https://example.com +# # kubernetes: +# # namespace: test +# rules: +# - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' +# when: manual +# - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' +# when: manual -- GitLab From 081d7e3157048291bff9d46222d24990acc9c98f Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Wed, 25 Jan 2023 13:52:00 +0000 Subject: [PATCH 125/158] CI/CD pipeline - updated pipeline to meet requirements and dependencies --- src/service/.gitlab-ci.yml | 50 +++++++++++++++++++------------------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/src/service/.gitlab-ci.yml b/src/service/.gitlab-ci.yml index b363c1881..6806f4e45 100644 --- a/src/service/.gitlab-ci.yml +++ b/src/service/.gitlab-ci.yml @@ -110,28 +110,28 @@ unit_test service: reports: junit: src/$IMAGE_NAME/tests/${IMAGE_NAME}_report.xml -# Deployment of the service in Kubernetes Cluster -deploy service: - variables: - IMAGE_NAME: 'service' # name of the microservice - IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) - stage: deploy - needs: - - unit test service - # - integ_test execute - script: - - 'sed -i "s/$IMAGE_NAME:.*/$IMAGE_NAME:$IMAGE_TAG/" manifests/${IMAGE_NAME}service.yaml' - - kubectl version - - kubectl get all - - kubectl apply -f "manifests/${IMAGE_NAME}service.yaml" - - kubectl get all - # environment: - # name: test - # url: https://example.com - # kubernetes: - # namespace: test - rules: - - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' - when: manual - - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' - when: manual +## Deployment of the service in Kubernetes Cluster +#deploy service: +# variables: +# IMAGE_NAME: 'service' # name of the microservice +# IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) +# stage: deploy +# needs: +# - unit test service +# # - integ_test execute +# script: +# - 'sed -i "s/$IMAGE_NAME:.*/$IMAGE_NAME:$IMAGE_TAG/" manifests/${IMAGE_NAME}service.yaml' +# - kubectl version +# - kubectl get all +# - kubectl apply -f "manifests/${IMAGE_NAME}service.yaml" +# - kubectl get all +# # environment: +# # name: test +# # url: https://example.com +# # kubernetes: +# # namespace: test +# rules: +# - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' +# when: manual +# - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' +# when: manual -- GitLab From bbe67090fcb5914d474d6e59f64cf0e3b8faa69c Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Wed, 25 Jan 2023 14:17:25 +0000 Subject: [PATCH 126/158] CI/CD pipeline - corrected dependencies in service component --- src/service/.gitlab-ci.yml | 130 +++++++++++++++++++++++++++++++------ 1 file changed, 110 insertions(+), 20 deletions(-) diff --git a/src/service/.gitlab-ci.yml b/src/service/.gitlab-ci.yml index 6806f4e45..e2feae0cc 100644 --- a/src/service/.gitlab-ci.yml +++ b/src/service/.gitlab-ci.yml @@ -52,39 +52,137 @@ unit_test service: - unit_test pathcomp-frontend before_script: - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY - - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create --driver=bridge --subnet=172.28.0.0/24 --gateway=172.28.0.254 teraflowbridge; fi - - if docker container ls | grep $IMAGE_NAME; then docker rm -f $IMAGE_NAME; else echo "$IMAGE_NAME image is not in the system"; fi + - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create --driver=bridge teraflowbridge; fi + + # Context-related + - if docker container ls | grep crdb; then docker rm -f crdb; else echo "CockroachDB container is not in the system"; fi + - if docker volume ls | grep crdb; then docker volume rm -f crdb; else echo "CockroachDB volume is not in the system"; fi + - if docker container ls | grep nats; then docker rm -f nats; else echo "NATS container is not in the system"; fi + + # Device-related + - if docker container ls | grep context; then docker rm -f context; else echo "context image is not in the system"; fi + - if docker container ls | grep device; then docker rm -f device; else echo "device image is not in the system"; fi + + # Pathcomp-related - if docker container ls | grep pathcomp-frontend; then docker rm -f pathcomp-frontend; else echo "pathcomp-frontend image is not in the system"; fi - if docker container ls | grep pathcomp-backend; then docker rm -f pathcomp-backend; else echo "pathcomp-backend image is not in the system"; fi + + # Service-related + - if docker container ls | grep $IMAGE_NAME; then docker rm -f $IMAGE_NAME; else echo "$IMAGE_NAME image is not in the system"; fi + script: - - docker pull "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG" + - docker pull "cockroachdb/cockroach:latest-v22.2" + - docker pull "nats:2.9" + - docker pull "$CI_REGISTRY_IMAGE/context:$IMAGE_TAG" + - docker pull "$CI_REGISTRY_IMAGE/device:$IMAGE_TAG" - docker pull "$CI_REGISTRY_IMAGE/pathcomp-frontend:$IMAGE_TAG" - docker pull "$CI_REGISTRY_IMAGE/pathcomp-backend:$IMAGE_TAG" - - docker run --name pathcomp-backend -d -p 8081:8081 --network=teraflowbridge --ip 172.28.0.1 $CI_REGISTRY_IMAGE/pathcomp-backend:$IMAGE_TAG + - docker pull "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG" + + # Context preparation + - docker volume create crdb + - > + docker run --name crdb -d --network=teraflowbridge -p 26257:26257 -p 8080:8080 + --env COCKROACH_DATABASE=tfs_test --env COCKROACH_USER=tfs --env COCKROACH_PASSWORD=tfs123 + --volume "crdb:/cockroach/cockroach-data" + cockroachdb/cockroach:latest-v22.2 start-single-node + - > + docker run --name nats -d --network=teraflowbridge -p 4222:4222 -p 8222:8222 + nats:2.9 --http_port 8222 --user tfs --pass tfs123 + - echo "Waiting for initialization..." + - while ! docker logs crdb 2>&1 | grep -q 'finished creating default user \"tfs\"'; do sleep 1; done + - docker logs crdb + - while ! docker logs nats 2>&1 | grep -q 'Server is ready'; do sleep 1; done + - docker logs nats + - docker ps -a + - CRDB_ADDRESS=$(docker inspect crdb --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}") + - echo $CRDB_ADDRESS + - NATS_ADDRESS=$(docker inspect nats --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}") + - echo $NATS_ADDRESS + - > + docker run --name context -d -p 1010:1010 + --env "CRDB_URI=cockroachdb://tfs:tfs123@${CRDB_ADDRESS}:26257/tfs_test?sslmode=require" + --env "MB_BACKEND=nats" + --env "NATS_URI=nats://tfs:tfs123@${NATS_ADDRESS}:4222" + --network=teraflowbridge + $CI_REGISTRY_IMAGE/context:$IMAGE_TAG + - CONTEXTSERVICE_SERVICE_HOST=$(docker inspect context --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}") + - echo $CONTEXTSERVICE_SERVICE_HOST + + # Device preparation + - > + docker run --name device -d -p 2020:2020 + --env "CONTEXTSERVICE_SERVICE_HOST=${CONTEXTSERVICE_SERVICE_HOST}" + --network=teraflowbridge + $CI_REGISTRY_IMAGE/device:$IMAGE_TAG + - DEVICESERVICE_SERVICE_HOST=$(docker inspect device --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}") + - echo $DEVICESERVICE_SERVICE_HOST + + # PathComp preparation + - > + docker run --name pathcomp-backend -d -p 8081:8081 + --network=teraflowbridge + $CI_REGISTRY_IMAGE/pathcomp-backend:$IMAGE_TAG + - PATHCOMP_BACKEND_HOST=$(docker inspect pathcomp-backend --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}") + - echo $PATHCOMP_BACKEND_HOST - sleep 1 - - docker run --name pathcomp-frontend -d -p 10020:10020 --env "PATHCOMP_BACKEND_HOST=172.28.0.1" --env "PATHCOMP_BACKEND_PORT=8081" --network=teraflowbridge --ip 172.28.0.2 $CI_REGISTRY_IMAGE/pathcomp-frontend:$IMAGE_TAG + - > + docker run --name pathcomp-frontend -d -p 10020:10020 + --env "PATHCOMP_BACKEND_HOST=${PATHCOMP_BACKEND_HOST}" + --env "PATHCOMP_BACKEND_PORT=8081" + --network=teraflowbridge + $CI_REGISTRY_IMAGE/pathcomp-frontend:$IMAGE_TAG - sleep 1 - - docker run --name $IMAGE_NAME -d -p 3030:3030 -v "$PWD/src/$IMAGE_NAME/tests:/opt/results" --network=teraflowbridge $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG + - PATHCOMPSERVICE_SERVICE_HOST=$(docker inspect pathcomp-frontend --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}") + - echo $PATHCOMPSERVICE_SERVICE_HOST + + # Service preparation + - > + docker run --name $IMAGE_NAME -d -p 3030:3030 + --env "CONTEXTSERVICE_SERVICE_HOST=${CONTEXTSERVICE_SERVICE_HOST}" + --env "DEVICESERVICE_SERVICE_HOST=${DEVICESERVICE_SERVICE_HOST}" + --env "PATHCOMPSERVICE_SERVICE_HOST=${PATHCOMPSERVICE_SERVICE_HOST}" + --volume "$PWD/src/$IMAGE_NAME/tests:/opt/results" + --network=teraflowbridge + $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG + + # Check status before the tests - sleep 5 - docker ps -a + - docker logs context + - docker logs device - docker logs pathcomp-frontend - docker logs pathcomp-backend - docker logs $IMAGE_NAME - - docker exec -i $IMAGE_NAME bash -c "coverage run -m pytest --log-level=INFO --verbose $IMAGE_NAME/tests/test_unitary.py --junitxml=/opt/results/${IMAGE_NAME}_report.xml" + + # Run the tests + - > + docker exec -i $IMAGE_NAME bash -c + "coverage run -m pytest --log-level=INFO --verbose $IMAGE_NAME/tests/test_unitary.py --junitxml=/opt/results/${IMAGE_NAME}_report.xml" - docker exec -i $IMAGE_NAME bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing" - - docker logs pathcomp-frontend - - docker logs pathcomp-backend - - docker logs $IMAGE_NAME + coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/' after_script: + # Check status after the tests - docker ps -a + - docker logs context + - docker logs device - docker logs pathcomp-frontend - docker logs pathcomp-backend - - docker logs ${IMAGE_NAME} + - docker logs $IMAGE_NAME + + - docker rm -f $IMAGE_NAME - docker rm -f pathcomp-frontend - docker rm -f pathcomp-backend - - docker rm -f $IMAGE_NAME + - docker rm -f device + - docker rm -f context + + - docker rm -f $IMAGE_NAME crdb nats + - docker volume rm -f crdb - docker network rm teraflowbridge + - docker volume prune --force + - docker image prune --force + rules: - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' @@ -95,15 +193,7 @@ unit_test service: - src/$IMAGE_NAME/Dockerfile - src/$IMAGE_NAME/tests/*.py - src/$IMAGE_NAME/tests/Dockerfile - - src/pathcomp/.gitlab-ci.yml - - src/pathcomp/frontend/**/*.{py,in,yml} - - src/pathcomp/frontend/Dockerfile - - src/pathcomp/frontend/tests/*.py - - src/pathcomp/backend/**/*.{c,h,conf} - - src/pathcomp/backend/Makefile - - src/pathcomp/backend/Dockerfile - manifests/${IMAGE_NAME}service.yaml - - manifests/pathcompservice.yaml - .gitlab-ci.yml artifacts: when: always -- GitLab From 32447f07bb199767bb537cfe5536a6ecfe37937e Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Wed, 25 Jan 2023 14:30:55 +0000 Subject: [PATCH 127/158] CI/CD pipeline - corrected dependencies in service component --- src/service/.gitlab-ci.yml | 60 ++++++++++++++++++++++++++++++++++++-- 1 file changed, 57 insertions(+), 3 deletions(-) diff --git a/src/service/.gitlab-ci.yml b/src/service/.gitlab-ci.yml index e2feae0cc..43b9cc80e 100644 --- a/src/service/.gitlab-ci.yml +++ b/src/service/.gitlab-ci.yml @@ -30,13 +30,40 @@ build service: - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' - changes: + # Common-triggers - src/common/**/*.py - proto/*.proto + - .gitlab-ci.yml + + # Context-triggers + - src/context/**/*.{py,in,yml} + - src/context/Dockerfile + - src/context/tests/*.py + - src/context/tests/Dockerfile + - manifests/contextservice.yaml + + # Device-triggers + - src/device/**/*.{py,in,yml} + - src/device/Dockerfile + - src/device/tests/*.py + - src/device/tests/Dockerfile + - manifests/deviceservice.yaml + + # PathComp-triggers + - src/pathcomp/.gitlab-ci.yml + - src/pathcomp/frontend/**/*.{py,in,yml} + - src/pathcomp/frontend/Dockerfile + - src/pathcomp/frontend/tests/*.py + - src/pathcomp/backend/**/*.{c,h,conf} + - src/pathcomp/backend/Makefile + - src/pathcomp/backend/Dockerfile + - manifests/pathcompservice.yaml + + # Service-triggers - src/$IMAGE_NAME/**/*.{py,in,yml} - src/$IMAGE_NAME/Dockerfile - src/$IMAGE_NAME/tests/*.py - manifests/${IMAGE_NAME}service.yaml - - .gitlab-ci.yml # Apply unit test to the component unit_test service: @@ -187,14 +214,41 @@ unit_test service: - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' - changes: + # Common-triggers - src/common/**/*.py - proto/*.proto + - .gitlab-ci.yml + + # Context-triggers + - src/context/**/*.{py,in,yml} + - src/context/Dockerfile + - src/context/tests/*.py + - src/context/tests/Dockerfile + - manifests/contextservice.yaml + + # Device-triggers + - src/device/**/*.{py,in,yml} + - src/device/Dockerfile + - src/device/tests/*.py + - src/device/tests/Dockerfile + - manifests/deviceservice.yaml + + # PathComp-triggers + - src/pathcomp/.gitlab-ci.yml + - src/pathcomp/frontend/**/*.{py,in,yml} + - src/pathcomp/frontend/Dockerfile + - src/pathcomp/frontend/tests/*.py + - src/pathcomp/backend/**/*.{c,h,conf} + - src/pathcomp/backend/Makefile + - src/pathcomp/backend/Dockerfile + - manifests/pathcompservice.yaml + + # Service-triggers - src/$IMAGE_NAME/**/*.{py,in,yml} - src/$IMAGE_NAME/Dockerfile - src/$IMAGE_NAME/tests/*.py - - src/$IMAGE_NAME/tests/Dockerfile - manifests/${IMAGE_NAME}service.yaml - - .gitlab-ci.yml + artifacts: when: always reports: -- GitLab From 580088c113d46d17f7bd6b960636f8a7a74e8214 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Wed, 25 Jan 2023 14:44:32 +0000 Subject: [PATCH 128/158] CI/CD pipeline - corrected dependencies in service component --- src/service/.gitlab-ci.yml | 62 ++------------------------------------ 1 file changed, 2 insertions(+), 60 deletions(-) diff --git a/src/service/.gitlab-ci.yml b/src/service/.gitlab-ci.yml index 43b9cc80e..cda011062 100644 --- a/src/service/.gitlab-ci.yml +++ b/src/service/.gitlab-ci.yml @@ -30,40 +30,13 @@ build service: - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' - changes: - # Common-triggers - src/common/**/*.py - proto/*.proto - - .gitlab-ci.yml - - # Context-triggers - - src/context/**/*.{py,in,yml} - - src/context/Dockerfile - - src/context/tests/*.py - - src/context/tests/Dockerfile - - manifests/contextservice.yaml - - # Device-triggers - - src/device/**/*.{py,in,yml} - - src/device/Dockerfile - - src/device/tests/*.py - - src/device/tests/Dockerfile - - manifests/deviceservice.yaml - - # PathComp-triggers - - src/pathcomp/.gitlab-ci.yml - - src/pathcomp/frontend/**/*.{py,in,yml} - - src/pathcomp/frontend/Dockerfile - - src/pathcomp/frontend/tests/*.py - - src/pathcomp/backend/**/*.{c,h,conf} - - src/pathcomp/backend/Makefile - - src/pathcomp/backend/Dockerfile - - manifests/pathcompservice.yaml - - # Service-triggers - src/$IMAGE_NAME/**/*.{py,in,yml} - src/$IMAGE_NAME/Dockerfile - src/$IMAGE_NAME/tests/*.py - manifests/${IMAGE_NAME}service.yaml + - .gitlab-ci.yml # Apply unit test to the component unit_test service: @@ -73,10 +46,6 @@ unit_test service: stage: unit_test_stage3 needs: - build service - - unit_test context - - unit_test device - - unit_test pathcomp-backend - - unit_test pathcomp-frontend before_script: - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create --driver=bridge teraflowbridge; fi @@ -214,40 +183,13 @@ unit_test service: - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' - changes: - # Common-triggers - src/common/**/*.py - proto/*.proto - - .gitlab-ci.yml - - # Context-triggers - - src/context/**/*.{py,in,yml} - - src/context/Dockerfile - - src/context/tests/*.py - - src/context/tests/Dockerfile - - manifests/contextservice.yaml - - # Device-triggers - - src/device/**/*.{py,in,yml} - - src/device/Dockerfile - - src/device/tests/*.py - - src/device/tests/Dockerfile - - manifests/deviceservice.yaml - - # PathComp-triggers - - src/pathcomp/.gitlab-ci.yml - - src/pathcomp/frontend/**/*.{py,in,yml} - - src/pathcomp/frontend/Dockerfile - - src/pathcomp/frontend/tests/*.py - - src/pathcomp/backend/**/*.{c,h,conf} - - src/pathcomp/backend/Makefile - - src/pathcomp/backend/Dockerfile - - manifests/pathcompservice.yaml - - # Service-triggers - src/$IMAGE_NAME/**/*.{py,in,yml} - src/$IMAGE_NAME/Dockerfile - src/$IMAGE_NAME/tests/*.py - manifests/${IMAGE_NAME}service.yaml + - .gitlab-ci.yml artifacts: when: always -- GitLab From f173762ef7b9e51aeb717eb7c90ad374ec2e37f7 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Wed, 25 Jan 2023 14:54:46 +0000 Subject: [PATCH 129/158] CI/CD pipeline - reduced dependencies since it breaks pipeline --- .gitlab-ci.yml | 5 +---- src/automation/.gitlab-ci.yml | 2 +- src/compute/.gitlab-ci.yml | 2 +- src/context/.gitlab-ci.yml | 2 +- src/device/.gitlab-ci.yml | 2 +- src/monitoring/.gitlab-ci.yml | 2 +- src/pathcomp/.gitlab-ci.yml | 5 ++--- src/policy/.gitlab-ci.yml | 2 +- src/service/.gitlab-ci.yml | 2 +- src/slice/.gitlab-ci.yml | 2 +- 10 files changed, 11 insertions(+), 15 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index dbc43f278..242f0b60d 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -18,10 +18,7 @@ stages: - build - build - test - - unit_test_stage1 - - unit_test_stage2 - - unit_test_stage3 - - unit_test_stage4 + - unit_test #- deploy #- end2end_test diff --git a/src/automation/.gitlab-ci.yml b/src/automation/.gitlab-ci.yml index 610cf8eec..9c66a1798 100644 --- a/src/automation/.gitlab-ci.yml +++ b/src/automation/.gitlab-ci.yml @@ -42,7 +42,7 @@ build automation: unit_test automation: variables: REPORTS_CONTAINER: "${IMAGE_NAME}-reports" - stage: unit_test_stage1 + stage: unit_test needs: - build automation before_script: diff --git a/src/compute/.gitlab-ci.yml b/src/compute/.gitlab-ci.yml index fdf8af236..d8614cd1c 100644 --- a/src/compute/.gitlab-ci.yml +++ b/src/compute/.gitlab-ci.yml @@ -43,7 +43,7 @@ unit_test compute: variables: IMAGE_NAME: 'compute' # name of the microservice IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) - stage: unit_test_stage1 + stage: unit_test needs: - build compute before_script: diff --git a/src/context/.gitlab-ci.yml b/src/context/.gitlab-ci.yml index 7d2a95e7c..044600bc5 100644 --- a/src/context/.gitlab-ci.yml +++ b/src/context/.gitlab-ci.yml @@ -43,7 +43,7 @@ unit_test context: variables: IMAGE_NAME: 'context' # name of the microservice IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) - stage: unit_test_stage1 + stage: unit_test needs: - build context before_script: diff --git a/src/device/.gitlab-ci.yml b/src/device/.gitlab-ci.yml index cb447e2e5..b0b32ab15 100644 --- a/src/device/.gitlab-ci.yml +++ b/src/device/.gitlab-ci.yml @@ -43,7 +43,7 @@ unit_test device: variables: IMAGE_NAME: 'device' # name of the microservice IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) - stage: unit_test_stage1 + stage: unit_test needs: - build device before_script: diff --git a/src/monitoring/.gitlab-ci.yml b/src/monitoring/.gitlab-ci.yml index 1dfefa1ee..4a981cba2 100644 --- a/src/monitoring/.gitlab-ci.yml +++ b/src/monitoring/.gitlab-ci.yml @@ -43,7 +43,7 @@ unit_test monitoring: variables: IMAGE_NAME: 'monitoring' # name of the microservice IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) - stage: unit_test_stage1 + stage: unit_test needs: - build monitoring before_script: diff --git a/src/pathcomp/.gitlab-ci.yml b/src/pathcomp/.gitlab-ci.yml index 5749a94fe..7658fcae9 100644 --- a/src/pathcomp/.gitlab-ci.yml +++ b/src/pathcomp/.gitlab-ci.yml @@ -52,7 +52,7 @@ unit_test pathcomp-backend: variables: IMAGE_NAME: 'pathcomp' # name of the microservice IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) - stage: unit_test_stage1 + stage: unit_test needs: - build pathcomp before_script: @@ -97,10 +97,9 @@ unit_test pathcomp-frontend: variables: IMAGE_NAME: 'pathcomp' # name of the microservice IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) - stage: unit_test_stage2 + stage: unit_test needs: - build pathcomp - - unit_test pathcomp-backend before_script: - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create --driver=bridge --subnet=172.28.0.0/24 --gateway=172.28.0.254 teraflowbridge; fi diff --git a/src/policy/.gitlab-ci.yml b/src/policy/.gitlab-ci.yml index d19da81d6..f257c554c 100644 --- a/src/policy/.gitlab-ci.yml +++ b/src/policy/.gitlab-ci.yml @@ -42,7 +42,7 @@ build policy: unit_test policy: variables: REPORTS_CONTAINER: "${IMAGE_NAME_POLICY}-reports" - stage: unit_test_stage1 + stage: unit_test needs: - build policy before_script: diff --git a/src/service/.gitlab-ci.yml b/src/service/.gitlab-ci.yml index cda011062..ae4b2c3af 100644 --- a/src/service/.gitlab-ci.yml +++ b/src/service/.gitlab-ci.yml @@ -43,7 +43,7 @@ unit_test service: variables: IMAGE_NAME: 'service' # name of the microservice IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) - stage: unit_test_stage3 + stage: unit_test needs: - build service before_script: diff --git a/src/slice/.gitlab-ci.yml b/src/slice/.gitlab-ci.yml index 5fbff7029..c1b2eb487 100644 --- a/src/slice/.gitlab-ci.yml +++ b/src/slice/.gitlab-ci.yml @@ -43,7 +43,7 @@ unit_test slice: variables: IMAGE_NAME: 'slice' # name of the microservice IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) - stage: unit_test_stage4 + stage: unit_test needs: - build slice - unit_test service -- GitLab From 131f870bdf05367312ef3d4cbcbf89941c348f18 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Wed, 25 Jan 2023 16:02:10 +0000 Subject: [PATCH 130/158] Monitoring component: - updated and migrated unitary test --- src/monitoring/tests/test_unitary.py | 222 ++++++++++----------------- 1 file changed, 83 insertions(+), 139 deletions(-) diff --git a/src/monitoring/tests/test_unitary.py b/src/monitoring/tests/test_unitary.py index b113f5a78..6e8741dae 100644 --- a/src/monitoring/tests/test_unitary.py +++ b/src/monitoring/tests/test_unitary.py @@ -12,58 +12,48 @@ # See the License for the specific language governing permissions and # limitations under the License. -import copy, os, pytest -import threading -import time -from queue import Queue +import copy, os, pytest #, threading, time +import logging +#from queue import Queue from random import random from time import sleep -from typing import Tuple - +from typing import Union #, Tuple from apscheduler.executors.pool import ProcessPoolExecutor from apscheduler.schedulers.background import BackgroundScheduler from apscheduler.schedulers.base import STATE_STOPPED from grpc._channel import _MultiThreadedRendezvous - from common.Constants import ServiceNameEnum from common.Settings import ( ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_service_port_grpc) -from common.logger import getJSONLogger -from common.orm.Database import Database -from common.orm.Factory import get_database_backend, BackendEnum as DatabaseBackendEnum -from common.message_broker.Factory import get_messagebroker_backend, BackendEnum as MessageBrokerBackendEnum -from common.message_broker.MessageBroker import MessageBroker +#from common.logger import getJSONLogger from common.proto import monitoring_pb2 +from common.proto.context_pb2 import EventTypeEnum, DeviceEvent, Device, Empty +from common.proto.context_pb2_grpc import add_ContextServiceServicer_to_server from common.proto.kpi_sample_types_pb2 import KpiSampleType -from common.proto.monitoring_pb2 import KpiId, KpiDescriptor, KpiList, SubsDescriptor, SubsList, AlarmID, \ - AlarmDescriptor, AlarmList, Kpi, KpiDescriptorList, SubsResponse, AlarmResponse, RawKpiTable -from common.tools.timestamp.Converters import timestamp_utcnow_to_float, timestamp_string_to_float - +from common.proto.monitoring_pb2 import KpiId, KpiDescriptor, SubsDescriptor, SubsList, AlarmID, \ + AlarmDescriptor, AlarmList, KpiDescriptorList, SubsResponse, AlarmResponse, RawKpiTable #, Kpi, KpiList +from common.tests.MockServicerImpl_Context import MockServicerImpl_Context +from common.tools.service.GenericGrpcService import GenericGrpcService +from common.tools.timestamp.Converters import timestamp_utcnow_to_float #, timestamp_string_to_float from context.client.ContextClient import ContextClient -from context.service.grpc_server.ContextService import ContextService -from common.proto.context_pb2 import EventTypeEnum, DeviceEvent, Device, Empty - +from context.service.ContextService import ContextService from device.client.DeviceClient import DeviceClient from device.service.DeviceService import DeviceService from device.service.driver_api.DriverFactory import DriverFactory from device.service.driver_api.DriverInstanceCache import DriverInstanceCache -from monitoring.service.AlarmManager import AlarmManager -from monitoring.service.MetricsDBTools import MetricsDB -from monitoring.service.SubscriptionManager import SubscriptionManager - -os.environ['DEVICE_EMULATED_ONLY'] = 'TRUE' -from device.service.drivers import DRIVERS # pylint: disable=wrong-import-position - from monitoring.client.MonitoringClient import MonitoringClient from monitoring.service import ManagementDBTools, MetricsDBTools -from monitoring.service.MonitoringService import MonitoringService +#from monitoring.service.AlarmManager import AlarmManager from monitoring.service.EventTools import EventsDeviceCollector +from monitoring.service.MetricsDBTools import MetricsDB +from monitoring.service.MonitoringService import MonitoringService +#from monitoring.service.SubscriptionManager import SubscriptionManager from monitoring.tests.Messages import create_kpi_request, include_kpi_request, monitor_kpi_request, \ - create_kpi_request_b, create_kpi_request_c, kpi_query, subs_descriptor, alarm_descriptor, \ - alarm_subscription + create_kpi_request_c, kpi_query, subs_descriptor, alarm_descriptor, alarm_subscription #, create_kpi_request_b from monitoring.tests.Objects import DEVICE_DEV1, DEVICE_DEV1_CONNECT_RULES, DEVICE_DEV1_UUID -from monitoring.service.MonitoringServiceServicerImpl import LOGGER +os.environ['DEVICE_EMULATED_ONLY'] = 'TRUE' +from device.service.drivers import DRIVERS # pylint: disable=wrong-import-position,ungrouped-imports ########################### @@ -71,49 +61,54 @@ from monitoring.service.MonitoringServiceServicerImpl import LOGGER ########################### LOCAL_HOST = '127.0.0.1' +MOCKSERVICE_PORT = 10000 -CONTEXT_SERVICE_PORT = 10000 + get_service_port_grpc(ServiceNameEnum.CONTEXT) # avoid privileged ports -os.environ[get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_HOST )] = str(LOCAL_HOST) -os.environ[get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(CONTEXT_SERVICE_PORT) - -DEVICE_SERVICE_PORT = 10000 + get_service_port_grpc(ServiceNameEnum.DEVICE) # avoid privileged ports +DEVICE_SERVICE_PORT = MOCKSERVICE_PORT + get_service_port_grpc(ServiceNameEnum.DEVICE) # avoid privileged ports os.environ[get_env_var_name(ServiceNameEnum.DEVICE, ENVVAR_SUFIX_SERVICE_HOST )] = str(LOCAL_HOST) os.environ[get_env_var_name(ServiceNameEnum.DEVICE, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(DEVICE_SERVICE_PORT) -MONITORING_SERVICE_PORT = 10000 + get_service_port_grpc(ServiceNameEnum.MONITORING) # avoid privileged ports +MONITORING_SERVICE_PORT = MOCKSERVICE_PORT + get_service_port_grpc(ServiceNameEnum.MONITORING) # avoid privileged ports os.environ[get_env_var_name(ServiceNameEnum.MONITORING, ENVVAR_SUFIX_SERVICE_HOST )] = str(LOCAL_HOST) os.environ[get_env_var_name(ServiceNameEnum.MONITORING, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(MONITORING_SERVICE_PORT) -METRICSDB_HOSTNAME = os.environ.get("METRICSDB_HOSTNAME") -METRICSDB_ILP_PORT = os.environ.get("METRICSDB_ILP_PORT") -METRICSDB_REST_PORT = os.environ.get("METRICSDB_REST_PORT") -METRICSDB_TABLE = os.environ.get("METRICSDB_TABLE") +METRICSDB_HOSTNAME = os.environ.get('METRICSDB_HOSTNAME') +METRICSDB_ILP_PORT = os.environ.get('METRICSDB_ILP_PORT') +METRICSDB_REST_PORT = os.environ.get('METRICSDB_REST_PORT') +METRICSDB_TABLE = os.environ.get('METRICSDB_TABLE') +LOGGER = logging.getLogger(__name__) -@pytest.fixture(scope='session') -def context_db_mb() -> Tuple[Database, MessageBroker]: - _database = Database(get_database_backend(backend=DatabaseBackendEnum.INMEMORY)) - _message_broker = MessageBroker(get_messagebroker_backend(backend=MessageBrokerBackendEnum.INMEMORY)) - yield _database, _message_broker - _message_broker.terminate() +class MockService_Dependencies(GenericGrpcService): + # Mock Service implementing Context and Device to simplify unitary tests of Monitoring + + def __init__(self, bind_port: Union[str, int]) -> None: + super().__init__(bind_port, LOCAL_HOST, enable_health_servicer=False, cls_name='MockService') + + # pylint: disable=attribute-defined-outside-init + def install_servicers(self): + self.context_servicer = MockServicerImpl_Context() + add_ContextServiceServicer_to_server(self.context_servicer, self.server) + + def configure_env_vars(self): + os.environ[get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_HOST )] = str(self.bind_address) + os.environ[get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(self.bind_port) @pytest.fixture(scope='session') -def context_service(context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name - database, message_broker = context_db_mb - database.clear_all() - _service = ContextService(database, message_broker) +def context_service(): + _service = MockService_Dependencies(MOCKSERVICE_PORT) + _service.configure_env_vars() _service.start() yield _service _service.stop() @pytest.fixture(scope='session') -def context_client(context_service : ContextService): # pylint: disable=redefined-outer-name +def context_client(context_service : ContextService): # pylint: disable=redefined-outer-name,unused-argument _client = ContextClient() yield _client _client.close() @pytest.fixture(scope='session') -def device_service(context_service : ContextService): # pylint: disable=redefined-outer-name +def device_service(context_service : ContextService): # pylint: disable=redefined-outer-name,unused-argument LOGGER.info('Initializing DeviceService...') driver_factory = DriverFactory(DRIVERS) driver_instance_cache = DriverInstanceCache(driver_factory) @@ -128,7 +123,7 @@ def device_service(context_service : ContextService): # pylint: disable=redefine _service.stop() @pytest.fixture(scope='session') -def device_client(device_service : DeviceService): # pylint: disable=redefined-outer-name +def device_client(device_service : DeviceService): # pylint: disable=redefined-outer-name,unused-argument _client = DeviceClient() yield _client _client.close() @@ -136,8 +131,8 @@ def device_client(device_service : DeviceService): # pylint: disable=redefined-o # This fixture will be requested by test cases and last during testing session @pytest.fixture(scope='session') def monitoring_service( - context_service : ContextService, # pylint: disable=redefined-outer-name - device_service : DeviceService # pylint: disable=redefined-outer-name + context_service : ContextService, # pylint: disable=redefined-outer-name,unused-argument + device_service : DeviceService # pylint: disable=redefined-outer-name,unused-argument ): LOGGER.info('Initializing MonitoringService...') _service = MonitoringService() @@ -153,7 +148,7 @@ def monitoring_service( # This fixture will be requested by test cases and last during testing session. # The client requires the server, so client fixture has the server as dependency. @pytest.fixture(scope='session') -def monitoring_client(monitoring_service : MonitoringService): # pylint: disable=redefined-outer-name +def monitoring_client(monitoring_service : MonitoringService): # pylint: disable=redefined-outer-name,unused-argument LOGGER.info('Initializing MonitoringClient...') _client = MonitoringClient() @@ -183,10 +178,13 @@ def subs_scheduler(): return _scheduler def ingestion_data(kpi_id_int): - metrics_db = MetricsDB("localhost", "9009", "9000", "monitoring") + # pylint: disable=redefined-outer-name,unused-argument + metrics_db = MetricsDB('localhost', '9009', '9000', 'monitoring') - for i in range(50): - kpiSampleType = KpiSampleType.Name(KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED).upper().replace('KPISAMPLETYPE_', '') + kpiSampleType = KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED + kpiSampleType_name = KpiSampleType.Name(kpiSampleType).upper().replace('KPISAMPLETYPE_', '') + for _ in range(50): + kpiSampleType = kpiSampleType_name kpiId = kpi_id_int deviceId = 'DEV'+ str(kpi_id_int) endpointId = 'END' + str(kpi_id_int) @@ -250,26 +248,12 @@ def test_include_kpi(monitoring_client): # pylint: disable=redefined-outer-name # Test case that makes use of client fixture to test server's MonitorKpi method def test_monitor_kpi( - context_client : ContextClient, # pylint: disable=redefined-outer-name + context_client : ContextClient, # pylint: disable=redefined-outer-name,unused-argument device_client : DeviceClient, # pylint: disable=redefined-outer-name monitoring_client : MonitoringClient, # pylint: disable=redefined-outer-name - context_db_mb : Tuple[Database, MessageBroker] # pylint: disable=redefined-outer-name ): LOGGER.info('test_monitor_kpi begin') - context_database = context_db_mb[0] - - # ----- Clean the database ----------------------------------------------------------------------------------------- - context_database.clear_all() - - # ----- Dump state of database before create the object ------------------------------------------------------------ - db_entries = context_database.dump() - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 0 - # ----- Update the object ------------------------------------------------------------------------------------------ LOGGER.info('Adding Device {:s}'.format(DEVICE_DEV1_UUID)) device_with_connect_rules = copy.deepcopy(DEVICE_DEV1) @@ -313,7 +297,7 @@ def test_set_kpi_subscription(monitoring_client,subs_scheduler): # pylint: disab subs_scheduler.shutdown() # Test case that makes use of client fixture to test server's GetSubsDescriptor method -def test_get_subs_descriptor(monitoring_client): +def test_get_subs_descriptor(monitoring_client): # pylint: disable=redefined-outer-name LOGGER.warning('test_get_subs_descriptor') kpi_id = monitoring_client.SetKpi(create_kpi_request_c()) monitoring_client.IncludeKpi(include_kpi_request(kpi_id)) @@ -324,14 +308,14 @@ def test_get_subs_descriptor(monitoring_client): assert isinstance(response, SubsDescriptor) # Test case that makes use of client fixture to test server's GetSubscriptions method -def test_get_subscriptions(monitoring_client): +def test_get_subscriptions(monitoring_client): # pylint: disable=redefined-outer-name LOGGER.warning('test_get_subscriptions') response = monitoring_client.GetSubscriptions(Empty()) LOGGER.debug(response) assert isinstance(response, SubsList) # Test case that makes use of client fixture to test server's DeleteSubscription method -def test_delete_subscription(monitoring_client): +def test_delete_subscription(monitoring_client): # pylint: disable=redefined-outer-name LOGGER.warning('test_delete_subscription') kpi_id = monitoring_client.SetKpi(create_kpi_request_c()) monitoring_client.IncludeKpi(include_kpi_request(kpi_id)) @@ -341,7 +325,7 @@ def test_delete_subscription(monitoring_client): assert isinstance(response, Empty) # Test case that makes use of client fixture to test server's SetKpiAlarm method -def test_set_kpi_alarm(monitoring_client): +def test_set_kpi_alarm(monitoring_client): # pylint: disable=redefined-outer-name LOGGER.warning('test_set_kpi_alarm') kpi_id = monitoring_client.SetKpi(create_kpi_request_c()) response = monitoring_client.SetKpiAlarm(alarm_descriptor(kpi_id)) @@ -349,14 +333,14 @@ def test_set_kpi_alarm(monitoring_client): assert isinstance(response, AlarmID) # Test case that makes use of client fixture to test server's GetAlarms method -def test_get_alarms(monitoring_client): +def test_get_alarms(monitoring_client): # pylint: disable=redefined-outer-name LOGGER.warning('test_get_alarms') response = monitoring_client.GetAlarms(Empty()) LOGGER.debug(response) assert isinstance(response, AlarmList) # Test case that makes use of client fixture to test server's GetAlarmDescriptor method -def test_get_alarm_descriptor(monitoring_client): +def test_get_alarm_descriptor(monitoring_client): # pylint: disable=redefined-outer-name LOGGER.warning('test_get_alarm_descriptor') _kpi_id = monitoring_client.SetKpi(create_kpi_request_c()) _alarm_id = monitoring_client.SetKpiAlarm(alarm_descriptor(_kpi_id)) @@ -365,7 +349,7 @@ def test_get_alarm_descriptor(monitoring_client): assert isinstance(_response, AlarmDescriptor) # Test case that makes use of client fixture to test server's GetAlarmResponseStream method -def test_get_alarm_response_stream(monitoring_client,subs_scheduler): +def test_get_alarm_response_stream(monitoring_client,subs_scheduler): # pylint: disable=redefined-outer-name LOGGER.warning('test_get_alarm_descriptor') _kpi_id = monitoring_client.SetKpi(create_kpi_request('3')) _alarm_id = monitoring_client.SetKpiAlarm(alarm_descriptor(_kpi_id)) @@ -380,7 +364,7 @@ def test_get_alarm_response_stream(monitoring_client,subs_scheduler): subs_scheduler.shutdown() # Test case that makes use of client fixture to test server's DeleteAlarm method -def test_delete_alarm(monitoring_client): +def test_delete_alarm(monitoring_client): # pylint: disable=redefined-outer-name LOGGER.warning('test_delete_alarm') _kpi_id = monitoring_client.SetKpi(create_kpi_request_c()) _alarm_id = monitoring_client.SetKpiAlarm(alarm_descriptor(_kpi_id)) @@ -408,15 +392,17 @@ def test_get_stream_kpi(monitoring_client): # pylint: disable=redefined-outer-na def test_managementdb_tools_kpis(management_db): # pylint: disable=redefined-outer-name LOGGER.warning('test_managementdb_tools_kpis begin') _create_kpi_request = create_kpi_request('5') - kpi_description = _create_kpi_request.kpi_description # pylint: disable=maybe-no-member - kpi_sample_type = _create_kpi_request.kpi_sample_type # pylint: disable=maybe-no-member - kpi_device_id = _create_kpi_request.device_id.device_uuid.uuid # pylint: disable=maybe-no-member - kpi_endpoint_id = _create_kpi_request.endpoint_id.endpoint_uuid.uuid # pylint: disable=maybe-no-member - kpi_service_id = _create_kpi_request.service_id.service_uuid.uuid # pylint: disable=maybe-no-member - kpi_slice_id = _create_kpi_request.slice_id.slice_uuid.uuid - kpi_connection_id = _create_kpi_request.connection_id.connection_uuid.uuid - - _kpi_id = management_db.insert_KPI(kpi_description, kpi_sample_type, kpi_device_id, kpi_endpoint_id, kpi_service_id,kpi_slice_id,kpi_connection_id) + kpi_description = _create_kpi_request.kpi_description # pylint: disable=maybe-no-member + kpi_sample_type = _create_kpi_request.kpi_sample_type # pylint: disable=maybe-no-member + kpi_device_id = _create_kpi_request.device_id.device_uuid.uuid # pylint: disable=maybe-no-member + kpi_endpoint_id = _create_kpi_request.endpoint_id.endpoint_uuid.uuid # pylint: disable=maybe-no-member + kpi_service_id = _create_kpi_request.service_id.service_uuid.uuid # pylint: disable=maybe-no-member + kpi_slice_id = _create_kpi_request.slice_id.slice_uuid.uuid # pylint: disable=maybe-no-member + kpi_connection_id = _create_kpi_request.connection_id.connection_uuid.uuid # pylint: disable=maybe-no-member + + _kpi_id = management_db.insert_KPI( + kpi_description, kpi_sample_type, kpi_device_id, kpi_endpoint_id, kpi_service_id, + kpi_slice_id, kpi_connection_id) assert isinstance(_kpi_id, int) response = management_db.get_KPI(_kpi_id) @@ -517,30 +503,16 @@ def test_managementdb_tools_insert_alarm(management_db): # assert total_points != 0 def test_events_tools( - context_client : ContextClient, # pylint: disable=redefined-outer-name + context_client : ContextClient, # pylint: disable=redefined-outer-name,unused-argument device_client : DeviceClient, # pylint: disable=redefined-outer-name - monitoring_client : MonitoringClient, # pylint: disable=redefined-outer-name - context_db_mb : Tuple[Database, MessageBroker] # pylint: disable=redefined-outer-name + monitoring_client : MonitoringClient, # pylint: disable=redefined-outer-name,unused-argument ): LOGGER.warning('test_get_device_events begin') - context_database = context_db_mb[0] - - # ----- Clean the database ----------------------------------------------------------------------------------------- - context_database.clear_all() - # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- events_collector = EventsDeviceCollector() events_collector.start() - # ----- Dump state of database before create the object ------------------------------------------------------------ - db_entries = context_database.dump() - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 0 - # ----- Update the object ------------------------------------------------------------------------------------------ LOGGER.info('Adding Device {:s}'.format(DEVICE_DEV1_UUID)) device_with_connect_rules = copy.deepcopy(DEVICE_DEV1) @@ -552,31 +524,17 @@ def test_events_tools( def test_get_device_events( - context_client : ContextClient, # pylint: disable=redefined-outer-name + context_client : ContextClient, # pylint: disable=redefined-outer-name,unused-argument device_client : DeviceClient, # pylint: disable=redefined-outer-name - monitoring_client : MonitoringClient, # pylint: disable=redefined-outer-name - context_db_mb : Tuple[Database, MessageBroker] # pylint: disable=redefined-outer-name + monitoring_client : MonitoringClient, # pylint: disable=redefined-outer-name,unused-argument ): LOGGER.warning('test_get_device_events begin') - context_database = context_db_mb[0] - - # ----- Clean the database ----------------------------------------------------------------------------------------- - context_database.clear_all() - # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- events_collector = EventsDeviceCollector() events_collector.start() - # ----- Dump state of database before create the object ------------------------------------------------------------ - db_entries = context_database.dump() - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 0 - # ----- Check create event ----------------------------------------------------------------------------------------- LOGGER.info('Adding Device {:s}'.format(DEVICE_DEV1_UUID)) device_with_connect_rules = copy.deepcopy(DEVICE_DEV1) @@ -592,31 +550,17 @@ def test_get_device_events( events_collector.stop() def test_listen_events( - context_client : ContextClient, # pylint: disable=redefined-outer-name + context_client : ContextClient, # pylint: disable=redefined-outer-name,unused-argument device_client : DeviceClient, # pylint: disable=redefined-outer-name - monitoring_client : MonitoringClient, # pylint: disable=redefined-outer-name - context_db_mb : Tuple[Database, MessageBroker] # pylint: disable=redefined-outer-name + monitoring_client : MonitoringClient, # pylint: disable=redefined-outer-name,unused-argument ): LOGGER.warning('test_listen_events begin') - context_database = context_db_mb[0] - - # ----- Clean the database ----------------------------------------------------------------------------------------- - context_database.clear_all() - # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- events_collector = EventsDeviceCollector() events_collector.start() - # ----- Dump state of database before create the object ------------------------------------------------------------ - db_entries = context_database.dump() - LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) - for db_entry in db_entries: - LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover - LOGGER.info('-----------------------------------------------------------') - assert len(db_entries) == 0 - LOGGER.info('Adding Device {:s}'.format(DEVICE_DEV1_UUID)) device_with_connect_rules = copy.deepcopy(DEVICE_DEV1) device_with_connect_rules['device_config']['config_rules'].extend(DEVICE_DEV1_CONNECT_RULES) -- GitLab From 8f3493c49043291ed283dbda811f1326b78b84f0 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Wed, 25 Jan 2023 16:05:25 +0000 Subject: [PATCH 131/158] Device, Monitoring components: - updated requirements --- src/device/requirements.in | 10 ++-------- src/monitoring/requirements.in | 12 +++--------- 2 files changed, 5 insertions(+), 17 deletions(-) diff --git a/src/device/requirements.in b/src/device/requirements.in index 2b9c199c8..70daceaea 100644 --- a/src/device/requirements.in +++ b/src/device/requirements.in @@ -1,22 +1,16 @@ anytree==2.8.0 APScheduler==3.8.1 -fastcache==1.1.0 +#fastcache==1.1.0 Jinja2==3.0.3 ncclient==0.6.13 p4runtime==1.3.0 paramiko==2.9.2 python-json-logger==2.0.2 pytz==2021.3 -redis==4.1.2 +#redis==4.1.2 requests==2.27.1 requests-mock==1.9.3 xmltodict==0.12.0 tabulate ipaddress macaddress - -# pip's dependency resolver does not take into account installed packages. -# p4runtime does not specify the version of grpcio/protobuf it needs, so it tries to install latest one -# adding here again grpcio==1.47.* and protobuf==3.20.* with explicit versions to prevent collisions -grpcio==1.47.* -protobuf==3.20.* diff --git a/src/monitoring/requirements.in b/src/monitoring/requirements.in index c07f0c8f4..bc5935012 100644 --- a/src/monitoring/requirements.in +++ b/src/monitoring/requirements.in @@ -1,6 +1,6 @@ -anytree==2.8.0 +#anytree==2.8.0 APScheduler==3.8.1 -fastcache==1.1.0 +#fastcache==1.1.0 #google-api-core #opencensus[stackdriver] #google-cloud-profiler @@ -13,14 +13,8 @@ influx-line-protocol==0.1.4 python-dateutil==2.8.2 python-json-logger==2.0.2 pytz==2021.3 -redis==4.1.2 +#redis==4.1.2 requests==2.27.1 xmltodict==0.12.0 questdb==1.0.1 psycopg2-binary==2.9.3 - -# pip's dependency resolver does not take into account installed packages. -# p4runtime does not specify the version of grpcio/protobuf it needs, so it tries to install latest one -# adding here again grpcio==1.47.* and protobuf==3.20.* with explicit versions to prevent collisions -grpcio==1.47.* -protobuf==3.20.* -- GitLab From d54b356cd5df0d5561471c73d7c6cfc7f6fdd933 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Wed, 25 Jan 2023 16:07:37 +0000 Subject: [PATCH 132/158] CI/CD pipeline - corrected dependencies in service component --- src/service/.gitlab-ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/src/service/.gitlab-ci.yml b/src/service/.gitlab-ci.yml index ae4b2c3af..c6d4b185f 100644 --- a/src/service/.gitlab-ci.yml +++ b/src/service/.gitlab-ci.yml @@ -124,6 +124,7 @@ unit_test service: - sleep 1 - > docker run --name pathcomp-frontend -d -p 10020:10020 + --env "CONTEXTSERVICE_SERVICE_HOST=${CONTEXTSERVICE_SERVICE_HOST}" --env "PATHCOMP_BACKEND_HOST=${PATHCOMP_BACKEND_HOST}" --env "PATHCOMP_BACKEND_PORT=8081" --network=teraflowbridge -- GitLab From e497882f810ceec31f376bfbf46d72925acea183 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Wed, 25 Jan 2023 16:59:59 +0000 Subject: [PATCH 133/158] Device component: - updated requirements --- src/device/requirements.in | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/device/requirements.in b/src/device/requirements.in index 70daceaea..96faa95c5 100644 --- a/src/device/requirements.in +++ b/src/device/requirements.in @@ -14,3 +14,9 @@ xmltodict==0.12.0 tabulate ipaddress macaddress + +# pip's dependency resolver does not take into account installed packages. +# p4runtime does not specify the version of grpcio/protobuf it needs, so it tries to install latest one +# adding here again grpcio==1.47.* and protobuf==3.20.* with explicit versions to prevent collisions +grpcio==1.47.* +protobuf==3.20.* -- GitLab From f58ebd4136f6f5bcf2c6cc02459fa51afa0043fb Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Wed, 25 Jan 2023 17:02:50 +0000 Subject: [PATCH 134/158] Monitoring component: - corrected imports and dependencies of unitary test --- src/monitoring/tests/test_unitary.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/src/monitoring/tests/test_unitary.py b/src/monitoring/tests/test_unitary.py index 6e8741dae..60d15ef9d 100644 --- a/src/monitoring/tests/test_unitary.py +++ b/src/monitoring/tests/test_unitary.py @@ -36,7 +36,6 @@ from common.tests.MockServicerImpl_Context import MockServicerImpl_Context from common.tools.service.GenericGrpcService import GenericGrpcService from common.tools.timestamp.Converters import timestamp_utcnow_to_float #, timestamp_string_to_float from context.client.ContextClient import ContextClient -from context.service.ContextService import ContextService from device.client.DeviceClient import DeviceClient from device.service.DeviceService import DeviceService from device.service.driver_api.DriverFactory import DriverFactory @@ -78,8 +77,8 @@ METRICSDB_TABLE = os.environ.get('METRICSDB_TABLE') LOGGER = logging.getLogger(__name__) -class MockService_Dependencies(GenericGrpcService): - # Mock Service implementing Context and Device to simplify unitary tests of Monitoring +class MockContextService(GenericGrpcService): + # Mock Service implementing Context to simplify unitary tests of Monitoring def __init__(self, bind_port: Union[str, int]) -> None: super().__init__(bind_port, LOCAL_HOST, enable_health_servicer=False, cls_name='MockService') @@ -95,20 +94,20 @@ class MockService_Dependencies(GenericGrpcService): @pytest.fixture(scope='session') def context_service(): - _service = MockService_Dependencies(MOCKSERVICE_PORT) + _service = MockContextService(MOCKSERVICE_PORT) _service.configure_env_vars() _service.start() yield _service _service.stop() @pytest.fixture(scope='session') -def context_client(context_service : ContextService): # pylint: disable=redefined-outer-name,unused-argument +def context_client(context_service : MockContextService): # pylint: disable=redefined-outer-name,unused-argument _client = ContextClient() yield _client _client.close() @pytest.fixture(scope='session') -def device_service(context_service : ContextService): # pylint: disable=redefined-outer-name,unused-argument +def device_service(context_service : MockContextService): # pylint: disable=redefined-outer-name,unused-argument LOGGER.info('Initializing DeviceService...') driver_factory = DriverFactory(DRIVERS) driver_instance_cache = DriverInstanceCache(driver_factory) @@ -131,7 +130,7 @@ def device_client(device_service : DeviceService): # pylint: disable=redefined-o # This fixture will be requested by test cases and last during testing session @pytest.fixture(scope='session') def monitoring_service( - context_service : ContextService, # pylint: disable=redefined-outer-name,unused-argument + context_service : MockContextService, # pylint: disable=redefined-outer-name,unused-argument device_service : DeviceService # pylint: disable=redefined-outer-name,unused-argument ): LOGGER.info('Initializing MonitoringService...') -- GitLab From f303c76e9e610acbfca8c508d3bda01ec1afa3dd Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Wed, 25 Jan 2023 17:48:44 +0000 Subject: [PATCH 135/158] CockroachDB deployment: - updated instructions - file cleanup --- manifests/cockroachdb/README.md | 15 +- .../cockroachdb/from_carlos/cluster-init.yaml | 20 -- .../from_carlos/cockroachdb-statefulset.yaml | 182 ------------------ manifests/cockroachdb/operator.yaml | 2 +- 4 files changed, 13 insertions(+), 206 deletions(-) delete mode 100644 manifests/cockroachdb/from_carlos/cluster-init.yaml delete mode 100644 manifests/cockroachdb/from_carlos/cockroachdb-statefulset.yaml diff --git a/manifests/cockroachdb/README.md b/manifests/cockroachdb/README.md index 2d9a94910..bfd774f0f 100644 --- a/manifests/cockroachdb/README.md +++ b/manifests/cockroachdb/README.md @@ -1,4 +1,12 @@ -# Ref: https://www.cockroachlabs.com/docs/stable/configure-cockroachdb-kubernetes.html +# CockroachDB configuration preparation + +These steps reproduce how to generate Cockroach manifest files used in TeraFlowSDN and apply them to MicroK8s. +For stability reasons, we fix the versions providing the manifest files. +In future releases of TeraFlowSDN, we might consider dynamically downloading and modifying the files. + +- Ref: https://www.cockroachlabs.com/docs/stable/configure-cockroachdb-kubernetes.html + +## Steps: DEPLOY_PATH="manifests/cockroachdb" OPERATOR_BASE_URL="https://raw.githubusercontent.com/cockroachdb/cockroach-operator/master" @@ -12,8 +20,9 @@ kubectl apply -f "${DEPLOY_PATH}/crds.yaml" # Deploy CockroachDB Operator curl -o "${DEPLOY_PATH}/operator.yaml" "${OPERATOR_BASE_URL}/install/operator.yaml" nano "${DEPLOY_PATH}/operator.yaml" -# - add env var: WATCH_NAMESPACE='crdb' -kubectl apply -f "${DEPLOY_PATH}/operator.yaml" +# - add env var: WATCH_NAMESPACE=%TFS_CRDB_NAMESPACE% +sed s/%TFS_CRDB_NAMESPACE%/crdb/g ${DEPLOY_PATH}/operator.yaml > ${DEPLOY_PATH}/tfs_crdb_operator.yaml +kubectl apply -f "${DEPLOY_PATH}/tfs_crdb_operator.yaml" # Deploy CockroachDB curl -o "${DEPLOY_PATH}/cluster.yaml" "${OPERATOR_BASE_URL}/examples/example.yaml" diff --git a/manifests/cockroachdb/from_carlos/cluster-init.yaml b/manifests/cockroachdb/from_carlos/cluster-init.yaml deleted file mode 100644 index 6590ba127..000000000 --- a/manifests/cockroachdb/from_carlos/cluster-init.yaml +++ /dev/null @@ -1,20 +0,0 @@ -# Generated file, DO NOT EDIT. Source: cloud/kubernetes/templates/cluster-init.yaml -apiVersion: batch/v1 -kind: Job -metadata: - name: cluster-init - labels: - app: cockroachdb -spec: - template: - spec: - containers: - - name: cluster-init - image: cockroachdb/cockroach:v22.1.6 - imagePullPolicy: IfNotPresent - command: - - "/cockroach/cockroach" - - "init" - - "--insecure" - - "--host=cockroachdb-0.cockroachdb" - restartPolicy: OnFailure diff --git a/manifests/cockroachdb/from_carlos/cockroachdb-statefulset.yaml b/manifests/cockroachdb/from_carlos/cockroachdb-statefulset.yaml deleted file mode 100644 index f308e8fce..000000000 --- a/manifests/cockroachdb/from_carlos/cockroachdb-statefulset.yaml +++ /dev/null @@ -1,182 +0,0 @@ -# Generated file, DO NOT EDIT. Source: cloud/kubernetes/templates/cockroachdb-statefulset.yaml -apiVersion: v1 -kind: Service -metadata: - # This service is meant to be used by clients of the database. It exposes a ClusterIP that will - # automatically load balance connections to the different database pods. - name: cockroachdb-public - labels: - app: cockroachdb -spec: - ports: - # The main port, served by gRPC, serves Postgres-flavor SQL, internode - # traffic and the cli. - - port: 26257 - targetPort: 26257 - name: grpc - # The secondary port serves the UI as well as health and debug endpoints. - - port: 8080 - targetPort: 8080 - name: http - selector: - app: cockroachdb ---- -apiVersion: v1 -kind: Service -metadata: - # This service only exists to create DNS entries for each pod in the stateful - # set such that they can resolve each other's IP addresses. It does not - # create a load-balanced ClusterIP and should not be used directly by clients - # in most circumstances. - name: cockroachdb - labels: - app: cockroachdb - annotations: - # Use this annotation in addition to the actual publishNotReadyAddresses - # field below because the annotation will stop being respected soon but the - # field is broken in some versions of Kubernetes: - # https://github.com/kubernetes/kubernetes/issues/58662 - service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" - # Enable automatic monitoring of all instances when Prometheus is running in the cluster. - prometheus.io/scrape: "true" - prometheus.io/path: "_status/vars" - prometheus.io/port: "8080" -spec: - ports: - - port: 26257 - targetPort: 26257 - name: grpc - - port: 8080 - targetPort: 8080 - name: http - # We want all pods in the StatefulSet to have their addresses published for - # the sake of the other CockroachDB pods even before they're ready, since they - # have to be able to talk to each other in order to become ready. - publishNotReadyAddresses: true - clusterIP: None - selector: - app: cockroachdb ---- -apiVersion: policy/v1beta1 -kind: PodDisruptionBudget -metadata: - name: cockroachdb-budget - labels: - app: cockroachdb -spec: - selector: - matchLabels: - app: cockroachdb - maxUnavailable: 1 ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: cockroachdb -spec: - serviceName: "cockroachdb" - replicas: 3 - selector: - matchLabels: - app: cockroachdb - template: - metadata: - labels: - app: cockroachdb - spec: - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 - podAffinityTerm: - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - cockroachdb - topologyKey: kubernetes.io/hostname - containers: - - name: cockroachdb - image: cockroachdb/cockroach:v22.1.6 - imagePullPolicy: IfNotPresent - # TODO: Change these to appropriate values for the hardware that you're running. You can see - # the resources that can be allocated on each of your Kubernetes nodes by running: - # kubectl describe nodes - # Note that requests and limits should have identical values. - resources: - requests: - cpu: "250m" - memory: "1Gi" - limits: - cpu: "1" - memory: "1Gi" - ports: - - containerPort: 26257 - name: grpc - - containerPort: 8080 - name: http -# We recommend that you do not configure a liveness probe on a production environment, as this can impact the availability of production databases. -# livenessProbe: -# httpGet: -# path: "/health" -# port: http -# initialDelaySeconds: 30 -# periodSeconds: 5 - readinessProbe: - httpGet: - path: "/health?ready=1" - port: http - initialDelaySeconds: 10 - periodSeconds: 5 - failureThreshold: 2 - volumeMounts: - - name: datadir - mountPath: /cockroach/cockroach-data - env: - - name: COCKROACH_CHANNEL - value: kubernetes-insecure - - name: GOMAXPROCS - valueFrom: - resourceFieldRef: - resource: limits.cpu - divisor: "1" - - name: MEMORY_LIMIT_MIB - valueFrom: - resourceFieldRef: - resource: limits.memory - divisor: "1Mi" - command: - - "/bin/bash" - - "-ecx" - # The use of qualified `hostname -f` is crucial: - # Other nodes aren't able to look up the unqualified hostname. - - exec - /cockroach/cockroach - start - --logtostderr - --insecure - --advertise-host $(hostname -f) - --http-addr 0.0.0.0 - --join cockroachdb-0.cockroachdb,cockroachdb-1.cockroachdb,cockroachdb-2.cockroachdb - --cache $(expr $MEMORY_LIMIT_MIB / 4)MiB - --max-sql-memory $(expr $MEMORY_LIMIT_MIB / 4)MiB - # No pre-stop hook is required, a SIGTERM plus some time is all that's - # needed for graceful shutdown of a node. - terminationGracePeriodSeconds: 60 - volumes: - - name: datadir - persistentVolumeClaim: - claimName: datadir - podManagementPolicy: Parallel - updateStrategy: - type: RollingUpdate - volumeClaimTemplates: - - metadata: - name: datadir - spec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 10Gi diff --git a/manifests/cockroachdb/operator.yaml b/manifests/cockroachdb/operator.yaml index 74734c7e9..2be72d329 100644 --- a/manifests/cockroachdb/operator.yaml +++ b/manifests/cockroachdb/operator.yaml @@ -543,7 +543,7 @@ spec: - name: OPERATOR_NAME value: cockroachdb - name: WATCH_NAMESPACE - value: crdb + value: %TFS_CRDB_NAMESPACE% - name: POD_NAME valueFrom: fieldRef: -- GitLab From ff2bb58f87926932c41ef8a16ab80f0587c5923d Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Wed, 25 Jan 2023 17:50:05 +0000 Subject: [PATCH 136/158] Monitoring component: - corrected imports and dependencies of unitary test --- src/monitoring/requirements.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/monitoring/requirements.in b/src/monitoring/requirements.in index bc5935012..fd7a555cf 100644 --- a/src/monitoring/requirements.in +++ b/src/monitoring/requirements.in @@ -1,4 +1,4 @@ -#anytree==2.8.0 +anytree==2.8.0 APScheduler==3.8.1 #fastcache==1.1.0 #google-api-core -- GitLab From 91531445c09bc7dac31188966ec3b120f5a468d3 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Wed, 25 Jan 2023 18:03:49 +0000 Subject: [PATCH 137/158] Common test tools: - corrected gRPC format of Events in MockMessageBroker --- src/common/tests/MockMessageBroker.py | 23 +++++++++++++++++--- src/common/tests/MockServicerImpl_Context.py | 12 +++------- 2 files changed, 23 insertions(+), 12 deletions(-) diff --git a/src/common/tests/MockMessageBroker.py b/src/common/tests/MockMessageBroker.py index 851c06766..563903b98 100644 --- a/src/common/tests/MockMessageBroker.py +++ b/src/common/tests/MockMessageBroker.py @@ -15,9 +15,24 @@ import json, logging, threading, time from queue import Queue, Empty from typing import Dict, Iterator, NamedTuple, Set +from common.proto.context_pb2 import EventTypeEnum LOGGER = logging.getLogger(__name__) -CONSUME_TIMEOUT = 0.1 # seconds + +TOPIC_CONNECTION = 'connection' +TOPIC_CONTEXT = 'context' +TOPIC_DEVICE = 'device' +TOPIC_LINK = 'link' +TOPIC_POLICY = 'policy' +TOPIC_SERVICE = 'service' +TOPIC_SLICE = 'slice' +TOPIC_TOPOLOGY = 'topology' + +TOPICS = { + TOPIC_CONNECTION, TOPIC_CONTEXT, TOPIC_DEVICE, TOPIC_LINK, TOPIC_POLICY, TOPIC_SERVICE, TOPIC_SLICE, TOPIC_TOPOLOGY +} + +CONSUME_TIMEOUT = 0.5 # seconds class Message(NamedTuple): topic: str @@ -54,8 +69,10 @@ class MockMessageBroker: def terminate(self): self._terminate.set() -def notify_event(messagebroker, topic_name, event_type, fields) -> None: - event = {'event': {'timestamp': time.time(), 'event_type': event_type}} +def notify_event( + messagebroker : MockMessageBroker, topic_name : str, event_type : EventTypeEnum, fields : Dict[str, str] +) -> None: + event = {'event': {'timestamp': {'timestamp': time.time()}, 'event_type': event_type}} for field_name, field_value in fields.items(): event[field_name] = field_value messagebroker.publish(Message(topic_name, json.dumps(event))) diff --git a/src/common/tests/MockServicerImpl_Context.py b/src/common/tests/MockServicerImpl_Context.py index f81a18135..f33f25dc1 100644 --- a/src/common/tests/MockServicerImpl_Context.py +++ b/src/common/tests/MockServicerImpl_Context.py @@ -24,19 +24,13 @@ from common.proto.context_pb2 import ( Slice, SliceEvent, SliceId, SliceIdList, SliceList, Topology, TopologyEvent, TopologyId, TopologyIdList, TopologyList) from common.proto.context_pb2_grpc import ContextServiceServicer -from common.tests.MockMessageBroker import MockMessageBroker, notify_event +from common.tests.MockMessageBroker import ( + TOPIC_CONNECTION, TOPIC_CONTEXT, TOPIC_DEVICE, TOPIC_LINK, TOPIC_SERVICE, TOPIC_SLICE, TOPIC_TOPOLOGY, + MockMessageBroker, notify_event) from common.tools.grpc.Tools import grpc_message_to_json, grpc_message_to_json_string LOGGER = logging.getLogger(__name__) -TOPIC_CONNECTION = 'connection' -TOPIC_CONTEXT = 'context' -TOPIC_TOPOLOGY = 'topology' -TOPIC_DEVICE = 'device' -TOPIC_LINK = 'link' -TOPIC_SERVICE = 'service' -TOPIC_SLICE = 'slice' - def get_container(database : Dict[str, Dict[str, Any]], container_name : str) -> Dict[str, Any]: return database.setdefault(container_name, {}) -- GitLab From b47b86aad1756e0d6865ed5a45b06cb5594f206f Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Wed, 25 Jan 2023 18:09:19 +0000 Subject: [PATCH 138/158] Monitoring component: - corrected MockContext in unitary test --- src/monitoring/tests/test_unitary.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/src/monitoring/tests/test_unitary.py b/src/monitoring/tests/test_unitary.py index 60d15ef9d..28de8ca65 100644 --- a/src/monitoring/tests/test_unitary.py +++ b/src/monitoring/tests/test_unitary.py @@ -61,6 +61,8 @@ from device.service.drivers import DRIVERS # pylint: disable=wrong-import-posit LOCAL_HOST = '127.0.0.1' MOCKSERVICE_PORT = 10000 +os.environ[get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_HOST )] = str(LOCAL_HOST) +os.environ[get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(MOCKSERVICE_PORT) DEVICE_SERVICE_PORT = MOCKSERVICE_PORT + get_service_port_grpc(ServiceNameEnum.DEVICE) # avoid privileged ports os.environ[get_env_var_name(ServiceNameEnum.DEVICE, ENVVAR_SUFIX_SERVICE_HOST )] = str(LOCAL_HOST) @@ -88,14 +90,9 @@ class MockContextService(GenericGrpcService): self.context_servicer = MockServicerImpl_Context() add_ContextServiceServicer_to_server(self.context_servicer, self.server) - def configure_env_vars(self): - os.environ[get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_HOST )] = str(self.bind_address) - os.environ[get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(self.bind_port) - @pytest.fixture(scope='session') def context_service(): _service = MockContextService(MOCKSERVICE_PORT) - _service.configure_env_vars() _service.start() yield _service _service.stop() -- GitLab From 8b96e04c194fbf90354d89104945d241125b2c2c Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Wed, 25 Jan 2023 18:27:06 +0000 Subject: [PATCH 139/158] Monitoring component: - corrected per unitary test cleanup --- src/monitoring/tests/test_unitary.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/monitoring/tests/test_unitary.py b/src/monitoring/tests/test_unitary.py index 28de8ca65..304539c4c 100644 --- a/src/monitoring/tests/test_unitary.py +++ b/src/monitoring/tests/test_unitary.py @@ -263,6 +263,8 @@ def test_monitor_kpi( LOGGER.debug(str(response)) assert isinstance(response, Empty) + device_client.DeleteDevice(response) + # Test case that makes use of client fixture to test server's QueryKpiData method def test_query_kpi_data(monitoring_client,subs_scheduler): # pylint: disable=redefined-outer-name @@ -516,6 +518,7 @@ def test_events_tools( response = device_client.AddDevice(Device(**device_with_connect_rules)) assert response.device_uuid.uuid == DEVICE_DEV1_UUID + device_client.DeleteDevice(response) events_collector.stop() @@ -543,6 +546,7 @@ def test_get_device_events( assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE assert event.device_id.device_uuid.uuid == DEVICE_DEV1_UUID + device_client.DeleteDevice(response) events_collector.stop() def test_listen_events( @@ -563,9 +567,10 @@ def test_listen_events( response = device_client.AddDevice(Device(**device_with_connect_rules)) assert response.device_uuid.uuid == DEVICE_DEV1_UUID - sleep(0.1) + sleep(1.0) kpi_id_list = events_collector.listen_events() - assert len(kpi_id_list) > 0 + + device_client.DeleteDevice(response) events_collector.stop() -- GitLab From a19a4ae40d4fed77b3bdcd8fdbb65a9916e43a82 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Thu, 26 Jan 2023 07:40:54 +0000 Subject: [PATCH 140/158] Monitoring component: - corrected test_monitor_kpi unitary test device removal --- src/monitoring/tests/test_unitary.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/monitoring/tests/test_unitary.py b/src/monitoring/tests/test_unitary.py index 304539c4c..e70827cbc 100644 --- a/src/monitoring/tests/test_unitary.py +++ b/src/monitoring/tests/test_unitary.py @@ -254,8 +254,8 @@ def test_monitor_kpi( LOGGER.info('Adding Device {:s}'.format(DEVICE_DEV1_UUID)) device_with_connect_rules = copy.deepcopy(DEVICE_DEV1) device_with_connect_rules['device_config']['config_rules'].extend(DEVICE_DEV1_CONNECT_RULES) - response = device_client.AddDevice(Device(**device_with_connect_rules)) - assert response.device_uuid.uuid == DEVICE_DEV1_UUID + device_id = device_client.AddDevice(Device(**device_with_connect_rules)) + assert device_id.device_uuid.uuid == DEVICE_DEV1_UUID response = monitoring_client.SetKpi(create_kpi_request('1')) _monitor_kpi_request = monitor_kpi_request(response.kpi_id.uuid, 120, 5) # pylint: disable=maybe-no-member @@ -263,7 +263,7 @@ def test_monitor_kpi( LOGGER.debug(str(response)) assert isinstance(response, Empty) - device_client.DeleteDevice(response) + device_client.DeleteDevice(device_id) # Test case that makes use of client fixture to test server's QueryKpiData method def test_query_kpi_data(monitoring_client,subs_scheduler): # pylint: disable=redefined-outer-name -- GitLab From 7c92fdca6db0438145085d7b4e48214a4f030edf Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Thu, 26 Jan 2023 07:41:43 +0000 Subject: [PATCH 141/158] PathComp component: - corrected CI/CD pipeline unit_test --- src/pathcomp/.gitlab-ci.yml | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/src/pathcomp/.gitlab-ci.yml b/src/pathcomp/.gitlab-ci.yml index 7658fcae9..7d664d8f5 100644 --- a/src/pathcomp/.gitlab-ci.yml +++ b/src/pathcomp/.gitlab-ci.yml @@ -102,22 +102,35 @@ unit_test pathcomp-frontend: - build pathcomp before_script: - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY - - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create --driver=bridge --subnet=172.28.0.0/24 --gateway=172.28.0.254 teraflowbridge; fi + - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create --driver=bridge teraflowbridge; fi - if docker container ls | grep ${IMAGE_NAME}-frontend; then docker rm -f ${IMAGE_NAME}-frontend; else echo "${IMAGE_NAME}-frontend image is not in the system"; fi - if docker container ls | grep ${IMAGE_NAME}-backend; then docker rm -f ${IMAGE_NAME}-backend; else echo "${IMAGE_NAME}-backend image is not in the system"; fi script: - docker pull "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-frontend:$IMAGE_TAG" - docker pull "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-backend:$IMAGE_TAG" - - docker run --name ${IMAGE_NAME}-backend -d -p 8081:8081 -v "$PWD/src/${IMAGE_NAME}/backend/tests:/opt/results" --network=teraflowbridge --ip 172.28.0.1 $CI_REGISTRY_IMAGE/${IMAGE_NAME}-backend:$IMAGE_TAG + - > + docker run --name ${IMAGE_NAME}-backend -d -p 8081:8081 --network=teraflowbridge + --volume "$PWD/src/${IMAGE_NAME}/backend/tests:/opt/results" + $CI_REGISTRY_IMAGE/${IMAGE_NAME}-backend:$IMAGE_TAG + - PATHCOMP_BACKEND_HOST=$(docker inspect ${IMAGE_NAME}-backend --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}") + - echo $PATHCOMP_BACKEND_HOST + - sleep 1 + - > + docker run --name ${IMAGE_NAME}-frontend -d -p 10020:10020 --network=teraflowbridge + --volume "$PWD/src/${IMAGE_NAME}/frontend/tests:/opt/results" + --env "PATHCOMP_BACKEND_HOST=${PATHCOMP_BACKEND_HOST}" + --env "PATHCOMP_BACKEND_PORT=8081" + $CI_REGISTRY_IMAGE/${IMAGE_NAME}-frontend:$IMAGE_TAG - sleep 1 - - docker run --name ${IMAGE_NAME}-frontend -d -p 10020:10020 --env "PATHCOMP_BACKEND_HOST=172.28.0.1" --env "PATHCOMP_BACKEND_PORT=8081" -v "$PWD/src/${IMAGE_NAME}/frontend/tests:/opt/results" --network=teraflowbridge --ip 172.28.0.2 $CI_REGISTRY_IMAGE/${IMAGE_NAME}-frontend:$IMAGE_TAG - docker exec -i ${IMAGE_NAME}-frontend bash -c "env" - docker exec -i ${IMAGE_NAME}-backend bash -c "env" - sleep 5 - docker ps -a - docker logs ${IMAGE_NAME}-frontend - docker logs ${IMAGE_NAME}-backend - - docker exec -i ${IMAGE_NAME}-frontend bash -c "coverage run -m pytest --log-level=INFO --verbose $IMAGE_NAME/frontend/tests/test_unitary.py --junitxml=/opt/results/${IMAGE_NAME}-frontend_report.xml" + - > + docker exec -i ${IMAGE_NAME}-frontend bash -c + "coverage run -m pytest --log-level=INFO --verbose $IMAGE_NAME/frontend/tests/test_unitary.py --junitxml=/opt/results/${IMAGE_NAME}-frontend_report.xml" - docker exec -i ${IMAGE_NAME}-frontend bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing" coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/' after_script: -- GitLab From 7ebe1f6809fa791fa43b51ab3345b50c116da6d1 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Thu, 26 Jan 2023 07:41:54 +0000 Subject: [PATCH 142/158] DLT component: - corrected CI/CD pipeline unit_test --- src/dlt/.gitlab-ci.yml | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/src/dlt/.gitlab-ci.yml b/src/dlt/.gitlab-ci.yml index 3c2013f50..5d9875ef9 100644 --- a/src/dlt/.gitlab-ci.yml +++ b/src/dlt/.gitlab-ci.yml @@ -109,20 +109,32 @@ unit test dlt-connector: - build dlt before_script: - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY - - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create --driver=bridge --subnet=172.28.0.0/24 --gateway=172.28.0.254 teraflowbridge; fi + - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create --driver=bridge teraflowbridge; fi - if docker container ls | grep ${IMAGE_NAME}-connector; then docker rm -f ${IMAGE_NAME}-connector; else echo "${IMAGE_NAME}-connector image is not in the system"; fi - if docker container ls | grep ${IMAGE_NAME}-gateway; then docker rm -f ${IMAGE_NAME}-gateway; else echo "${IMAGE_NAME}-gateway image is not in the system"; fi script: - docker pull "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-connector:$IMAGE_TAG" - docker pull "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-gateway:$IMAGE_TAG" - - docker run --name ${IMAGE_NAME}-gateway -d -p 50051:50051 -v "$PWD/src/${IMAGE_NAME}/gateway/tests:/opt/results" --network=teraflowbridge --ip 172.28.0.1 $CI_REGISTRY_IMAGE/${IMAGE_NAME}-gateway:$IMAGE_TAG + - > + docker run --name ${IMAGE_NAME}-gateway -d -p 50051:50051 --network=teraflowbridge + --volume "$PWD/src/${IMAGE_NAME}/gateway/tests:/opt/results" + $CI_REGISTRY_IMAGE/${IMAGE_NAME}-gateway:$IMAGE_TAG + - DLT_GATEWAY_HOST=$(docker inspect ${IMAGE_NAME}-gateway --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}") + - echo $DLT_GATEWAY_HOST - sleep 1 - - docker run --name ${IMAGE_NAME}-connector -d -p 8080:8080 --env "DLT_GATEWAY_HOST=172.28.0.1" --env "DLT_GATEWAY_PORT=50051" -v "$PWD/src/${IMAGE_NAME}/connector/tests:/opt/results" --network=teraflowbridge --ip 172.28.0.2 $CI_REGISTRY_IMAGE/${IMAGE_NAME}-connector:$IMAGE_TAG + - > + docker run --name ${IMAGE_NAME}-connector -d -p 8080:8080 --network=teraflowbridge + --volume "$PWD/src/${IMAGE_NAME}/connector/tests:/opt/results" + --env "DLT_GATEWAY_HOST=${DLT_GATEWAY_HOST}" + --env "DLT_GATEWAY_PORT=50051" + $CI_REGISTRY_IMAGE/${IMAGE_NAME}-connector:$IMAGE_TAG - sleep 5 - docker ps -a - docker logs ${IMAGE_NAME}-connector - docker logs ${IMAGE_NAME}-gateway - - docker exec -i ${IMAGE_NAME}-connector bash -c "coverage run -m pytest --log-level=INFO --verbose $IMAGE_NAME/connector/tests/test_unitary.py --junitxml=/opt/results/${IMAGE_NAME}-connector_report.xml" + - > + docker exec -i ${IMAGE_NAME}-connector bash -c + "coverage run -m pytest --log-level=INFO --verbose $IMAGE_NAME/connector/tests/test_unitary.py --junitxml=/opt/results/${IMAGE_NAME}-connector_report.xml" - docker exec -i ${IMAGE_NAME}-connector bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing" coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/' after_script: -- GitLab From 6733b6c199526ab4a602bcd8b53f23540efc9bc7 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Thu, 26 Jan 2023 07:58:50 +0000 Subject: [PATCH 143/158] PathComp component: - corrected CI/CD pipeline unit_test --- src/pathcomp/.gitlab-ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/src/pathcomp/.gitlab-ci.yml b/src/pathcomp/.gitlab-ci.yml index 7d664d8f5..787913539 100644 --- a/src/pathcomp/.gitlab-ci.yml +++ b/src/pathcomp/.gitlab-ci.yml @@ -62,6 +62,7 @@ unit_test pathcomp-backend: - if docker container ls | grep ${IMAGE_NAME}-backend; then docker rm -f ${IMAGE_NAME}-backend; else echo "${IMAGE_NAME}-backend image is not in the system"; fi script: - docker pull "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-backend:$IMAGE_TAG" + - docker ps -a #- docker run --name ${IMAGE_NAME}-backend -d -p 8081:8081 -v "$PWD/src/${IMAGE_NAME}/backend/tests:/opt/results" --network=teraflowbridge ${IMAGE_NAME}-backend:${IMAGE_TAG}-builder - docker run --name ${IMAGE_NAME}-backend -d -p 8081:8081 --network=teraflowbridge ${IMAGE_NAME}-backend:${IMAGE_TAG}-builder - sleep 5 -- GitLab From 903748da29ae587dce128f6ef375ae146ca475c3 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Thu, 26 Jan 2023 15:09:53 +0000 Subject: [PATCH 144/158] Deploy scripts: - updated deploy scripts to include CockroachDB and NATS - reorganized deploy scripts in a new "deploy" folder --- deploy/all.sh | 118 ++++++ deploy/crdb.sh | 396 ++++++++++++++++++ .../deploy_component.sh | 15 +- deploy/nats.sh | 144 +++++++ show_deploy.sh => deploy/show.sh | 0 deploy.sh => deploy/tfs.sh | 36 +- manifests/cockroachdb/single-node.yaml | 84 ++++ my_deploy.sh | 71 +++- report_coverage_slice.sh | 3 - src/context/data/cleanup_commands.sql | 12 + 10 files changed, 837 insertions(+), 42 deletions(-) create mode 100644 deploy/all.sh create mode 100755 deploy/crdb.sh rename deploy_component.sh => deploy/deploy_component.sh (94%) create mode 100755 deploy/nats.sh rename show_deploy.sh => deploy/show.sh (100%) rename deploy.sh => deploy/tfs.sh (90%) create mode 100644 manifests/cockroachdb/single-node.yaml delete mode 100755 report_coverage_slice.sh create mode 100644 src/context/data/cleanup_commands.sql diff --git a/deploy/all.sh b/deploy/all.sh new file mode 100644 index 000000000..2be46a28b --- /dev/null +++ b/deploy/all.sh @@ -0,0 +1,118 @@ +#!/bin/bash +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +######################################################################################################################## +# Read deployment settings +######################################################################################################################## + +# If not already set, set the URL of the Docker registry where the images will be uploaded to. +# By default, assume internal MicroK8s registry is used. +export TFS_REGISTRY_IMAGES=${TFS_REGISTRY_IMAGES:-"http://localhost:32000/tfs/"} + +# If not already set, set the list of components you want to build images for, and deploy. +# By default, only basic components are deployed +export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device monitoring service compute webui"} + +# If not already set, set the tag you want to use for your images. +export TFS_IMAGE_TAG=${TFS_IMAGE_TAG:-"dev"} + +# If not already set, set the name of the Kubernetes namespace to deploy to. +export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"} + +# If not already set, set additional manifest files to be applied after the deployment +export TFS_EXTRA_MANIFESTS=${TFS_EXTRA_MANIFESTS:-""} + +# If not already set, set the new Grafana admin password +export TFS_GRAFANA_PASSWORD=${TFS_GRAFANA_PASSWORD:-"admin123+"} + +# If not already set, disable skip-build flag. +# If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used. +export TFS_SKIP_BUILD=${TFS_SKIP_BUILD:-""} + +# If not already set, set the namespace where CockroackDB will be deployed. +export CRDB_NAMESPACE=${CRDB_NAMESPACE:-"crdb"} + +# If not already set, set the database username to be used by Context. +export CRDB_USERNAME=${CRDB_USERNAME:-"tfs"} + +# If not already set, set the database user's password to be used by Context. +export CRDB_PASSWORD=${CRDB_PASSWORD:-"tfs123"} + +# If not already set, set the database name to be used by Context. +export CRDB_DATABASE=${CRDB_DATABASE:-"tfs"} + +# If not already set, set the name of the secret where CockroachDB data and credentials will be stored. +export CRDB_SECRET_NAME=${CRDB_SECRET_NAME:-"crdb-data"} + +# If not already set, set the namespace where the secret containing CockroachDB data and credentials will be stored. +export CRDB_SECRET_NAMESPACE=${CRDB_SECRET_NAMESPACE:-${TFS_K8S_NAMESPACE}} + +# If not already set, set CockroachDB installation mode. Accepted values are: 'single' and 'cluster'. +# "YES", the database pointed by variable CRDB_NAMESPACE will be dropped while +# checking/deploying CockroachDB. +# - If CRDB_DEPLOY_MODE is "single", CockroachDB is deployed in single node mode. It is convenient for +# development and testing purposes and should fit in a VM. IT SHOULD NOT BE USED IN PRODUCTION ENVIRONMENTS. +# - If CRDB_DEPLOY_MODE is "cluster", CockroachDB is deployed in cluster mode, and an entire CockroachDB cluster +# with 3 replicas and version v22.2 (set by default) will be deployed. It is convenient for production and +# provides scalability features. If you are deploying for production, also read the following link providing +# details on deploying CockroachDB for production environments: +# Ref: https://www.cockroachlabs.com/docs/stable/recommended-production-settings.html +export CRDB_DEPLOY_MODE=${CRDB_DEPLOY_MODE:-"single"} + +# If not already set, disable flag for dropping database if exists. +# WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE DATABASE INFORMATION! +# If CRDB_DROP_DATABASE_IF_EXISTS is "YES", the database pointed by variable CRDB_NAMESPACE will be dropped while +# checking/deploying CockroachDB. +export CRDB_DROP_DATABASE_IF_EXISTS=${CRDB_DROP_DATABASE_IF_EXISTS:-""} + +# If not already set, disable flag for re-deploying CockroachDB from scratch. +# WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE DATABASE INFORMATION! +# WARNING: THE REDEPLOY MIGHT TAKE FEW MINUTES TO COMPLETE GRACEFULLY IN CLUSTER MODE +# If CRDB_REDEPLOY is "YES", the database will be dropped while checking/deploying CockroachDB. +export CRDB_REDEPLOY=${CRDB_REDEPLOY:-""} + +# If not already set, set the namespace where NATS will be deployed. +export NATS_NAMESPACE=${NATS_NAMESPACE:-"nats"} + +# If not already set, set the name of the secret where NATS data and credentials will be stored. +export NATS_SECRET_NAME=${NATS_SECRET_NAME:-"nats-data"} + +# If not already set, set the namespace where the secret containing NATS data and credentials will be stored. +export NATS_SECRET_NAMESPACE=${NATS_SECRET_NAMESPACE:-${TFS_K8S_NAMESPACE}} + +# If not already set, disable flag for re-deploying NATS from scratch. +# WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE MESSAGE BROKER INFORMATION! +# If NATS_REDEPLOY is "YES", the message broker will be dropped while checking/deploying NATS. +export NATS_REDEPLOY=${NATS_REDEPLOY:-""} + + +######################################################################################################################## +# Automated steps start here +######################################################################################################################## + +# Deploy CockroachDB +./deploy/deploy_crdb.sh + +# Deploy NATS +./deploy/deploy_nats.sh + +# Deploy TFS +./deploy/deploy_tfs.sh + +# Show deploy summary +./show_deploy.sh + +echo "Done!" diff --git a/deploy/crdb.sh b/deploy/crdb.sh new file mode 100755 index 000000000..c3cae9d40 --- /dev/null +++ b/deploy/crdb.sh @@ -0,0 +1,396 @@ +#!/bin/bash +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +######################################################################################################################## +# Read deployment settings +######################################################################################################################## + +# If not already set, set the namespace where CockroackDB will be deployed. +export CRDB_NAMESPACE=${CRDB_NAMESPACE:-"crdb"} + +# If not already set, set the database username to be used by Context. +export CRDB_USERNAME=${CRDB_USERNAME:-"tfs"} + +# If not already set, set the database user's password to be used by Context. +export CRDB_PASSWORD=${CRDB_PASSWORD:-"tfs123"} + +# If not already set, set the database name to be used by Context. +export CRDB_DATABASE=${CRDB_DATABASE:-"tfs"} + +# If not already set, set the name of the secret where CockroachDB data and credentials will be stored. +export CRDB_SECRET_NAME=${CRDB_SECRET_NAME:-"crdb-data"} + +# If not already set, set the namespace where the secret containing CockroachDB data and credentials will be stored. +export CRDB_SECRET_NAMESPACE=${CRDB_SECRET_NAMESPACE:-"tfs"} + +# If not already set, set CockroachDB installation mode. Accepted values are: 'single' and 'cluster'. +# "YES", the database pointed by variable CRDB_NAMESPACE will be dropped while +# checking/deploying CockroachDB. +# - If CRDB_DEPLOY_MODE is "single", CockroachDB is deployed in single node mode. It is convenient for +# development and testing purposes and should fit in a VM. IT SHOULD NOT BE USED IN PRODUCTION ENVIRONMENTS. +# - If CRDB_DEPLOY_MODE is "cluster", CockroachDB is deployed in cluster mode, and an entire CockroachDB cluster +# with 3 replicas and version v22.2 (set by default) will be deployed. It is convenient for production and +# provides scalability features. If you are deploying for production, also read the following link providing +# details on deploying CockroachDB for production environments: +# Ref: https://www.cockroachlabs.com/docs/stable/recommended-production-settings.html +export CRDB_DEPLOY_MODE=${CRDB_DEPLOY_MODE:-"single"} + +# If not already set, disable flag for dropping database if exists. +# WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE DATABASE INFORMATION! +# If CRDB_DROP_DATABASE_IF_EXISTS is "YES", the database pointed by variable CRDB_NAMESPACE will be dropped while +# checking/deploying CockroachDB. +export CRDB_DROP_DATABASE_IF_EXISTS=${CRDB_DROP_DATABASE_IF_EXISTS:-""} + +# If not already set, disable flag for re-deploying CockroachDB from scratch. +# WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE DATABASE INFORMATION! +# WARNING: THE REDEPLOY MIGHT TAKE FEW MINUTES TO COMPLETE GRACEFULLY IN CLUSTER MODE +# If CRDB_REDEPLOY is "YES", the database will be dropped while checking/deploying CockroachDB. +export CRDB_REDEPLOY=${CRDB_REDEPLOY:-""} + + +######################################################################################################################## +# Automated steps start here +######################################################################################################################## + +# Constants +TMP_FOLDER="./tmp" +CRDB_MANIFESTS_PATH="manifests/cockroachdb" + +# Create a tmp folder for files modified during the deployment +TMP_MANIFESTS_FOLDER="$TMP_FOLDER/manifests" +TMP_LOGS_FOLDER="$TMP_FOLDER/logs" +CRDB_LOG_FILE="$TMP_LOGS_FOLDER/crdb_deploy.log" +mkdir -p $TMP_LOGS_FOLDER + +function crdb_deploy_single() { + echo "CockroachDB Namespace" + echo ">>> Create CockroachDB Namespace (if missing)" + kubectl create namespace ${CRDB_NAMESPACE} + echo + + echo "CockroachDB (single-node)" + echo ">>> Checking if CockroachDB is deployed..." + if kubectl get --namespace ${CRDB_NAMESPACE} statefulset/cockroachdb &> /dev/null; then + echo ">>> CockroachDB is present; skipping step." + else + echo ">>> Deploy CockroachDB" + cp "${CRDB_MANIFESTS_PATH}/single-node.yaml" "${TMP_MANIFESTS_FOLDER}/crdb_single_node.yaml" + sed -i "s/%CRDB_DATABASE%/${CRDB_DATABASE}/g" "${TMP_MANIFESTS_FOLDER}/crdb_single_node.yaml" + sed -i "s/%CRDB_USERNAME%/${CRDB_USERNAME}/g" "${TMP_MANIFESTS_FOLDER}/crdb_single_node.yaml" + sed -i "s/%CRDB_PASSWORD%/${CRDB_PASSWORD}/g" "${TMP_MANIFESTS_FOLDER}/crdb_single_node.yaml" + kubectl apply --namespace ${CRDB_NAMESPACE} -f "${TMP_MANIFESTS_FOLDER}/crdb_single_node.yaml" + + echo ">>> Waiting CockroachDB statefulset to be created..." + while ! kubectl get --namespace ${CRDB_NAMESPACE} statefulset/cockroachdb &> /dev/null; do + printf "%c" "." + sleep 1 + done + + # Wait for statefulset condition "Available=True" does not work + # Wait for statefulset condition "jsonpath='{.status.readyReplicas}'=3" throws error: + # "error: readyReplicas is not found" + # Workaround: Check the pods are ready + #echo ">>> CockroachDB statefulset created. Waiting for readiness condition..." + #kubectl wait --namespace ${CRDB_NAMESPACE} --for=condition=Available=True --timeout=300s statefulset/cockroachdb + #kubectl wait --namespace ${CRDB_NAMESPACE} --for=jsonpath='{.status.readyReplicas}'=3 --timeout=300s \ + # statefulset/cockroachdb + echo ">>> CockroachDB statefulset created. Waiting CockroachDB pods to be created..." + while ! kubectl get --namespace ${CRDB_NAMESPACE} pod/cockroachdb-0 &> /dev/null; do + printf "%c" "." + sleep 1 + done + kubectl wait --namespace ${CRDB_NAMESPACE} --for=condition=Ready --timeout=300s pod/cockroachdb-0 + fi + echo + + echo "CockroachDB Port Mapping" + echo ">>> Expose CockroachDB SQL port (26257)" + CRDB_SQL_PORT=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}') + PATCH='{"data": {"'${CRDB_SQL_PORT}'": "'${CRDB_NAMESPACE}'/cockroachdb-public:'${CRDB_SQL_PORT}'"}}' + kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}" + + PORT_MAP='{"containerPort": '${CRDB_SQL_PORT}', "hostPort": '${CRDB_SQL_PORT}'}' + CONTAINER='{"name": "nginx-ingress-microk8s", "ports": ['${PORT_MAP}']}' + PATCH='{"spec": {"template": {"spec": {"containers": ['${CONTAINER}']}}}}' + kubectl patch daemonset nginx-ingress-microk8s-controller --namespace ingress --patch "${PATCH}" + echo + + echo ">>> Expose CockroachDB HTTP Mgmt GUI port (8080)" + CRDB_GUI_PORT=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="http")].port}') + PATCH='{"data": {"'${CRDB_GUI_PORT}'": "'${CRDB_NAMESPACE}'/cockroachdb-public:'${CRDB_GUI_PORT}'"}}' + kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}" + + PORT_MAP='{"containerPort": '${CRDB_GUI_PORT}', "hostPort": '${CRDB_GUI_PORT}'}' + CONTAINER='{"name": "nginx-ingress-microk8s", "ports": ['${PORT_MAP}']}' + PATCH='{"spec": {"template": {"spec": {"containers": ['${CONTAINER}']}}}}' + kubectl patch daemonset nginx-ingress-microk8s-controller --namespace ingress --patch "${PATCH}" + echo + + echo "Create secret with CockroachDB data" + kubectl create secret generic ${CRDB_SECRET_NAME} --namespace ${CRDB_SECRET_NAMESPACE} --type='Opaque' \ + --from-literal=namespace=${CRDB_NAMESPACE} \ + --from-literal=sql_port=${CRDB_SQL_PORT} \ + --from-literal=gui_port=${CRDB_GUI_PORT} \ + --from-literal=database=${CRDB_DATABASE} \ + --from-literal=username=${CRDB_USERNAME} \ + --from-literal=password="'"${CRDB_PASSWORD}"'" \ + --from-literal=sslmode=require + + kubectl get all --all-namespaces +} + +function crdb_undeploy_single() { + echo "Delete secret with CockroachDB data" + kubectl delete secret ${CRDB_SECRET_NAME} --namespace ${CRDB_SECRET_NAMESPACE} --ignore-not-found + echo + + echo "CockroachDB" + echo ">>> Checking if CockroachDB is deployed..." + if kubectl get --namespace ${CRDB_NAMESPACE} statefulset/cockroachdb &> /dev/null; then + echo ">>> Undeploy CockroachDB" + kubectl delete --namespace ${CRDB_NAMESPACE} -f "${TMP_MANIFESTS_FOLDER}/crdb_single_node.yaml" --ignore-not-found + else + echo ">>> CockroachDB is not present; skipping step." + fi + echo + + echo "CockroachDB Namespace" + echo ">>> Delete CockroachDB Namespace (if exists)" + echo "NOTE: this step might take few minutes to complete!" + kubectl delete namespace ${CRDB_NAMESPACE} --ignore-not-found + echo +} + +function crdb_drop_database_single() { + echo "Drop database if exists" + CRDB_CLIENT_URL="postgresql://${CRDB_USERNAME}:${CRDB_PASSWORD}@cockroachdb-0:${CRDB_SQL_PORT}/defaultdb?sslmode=require" + kubectl exec -it --namespace ${CRDB_NAMESPACE} cockroachdb-0 -- \ + ./cockroach sql --certs-dir=/cockroach/cockroach-certs --url=${CRDB_CLIENT_URL} \ + --execute "DROP DATABASE IF EXISTS ${CRDB_DATABASE};" + echo +} + +function crdb_deploy_cluster() { + echo "Cockroach Operator CRDs" + echo ">>> Apply Cockroach Operator CRDs (if they are missing)" + cp "${CRDB_MANIFESTS_PATH}/crds.yaml" "${TMP_MANIFESTS_FOLDER}/crdb_crds.yaml" + kubectl apply -f "${TMP_MANIFESTS_FOLDER}/crdb_crds.yaml" + echo + + echo "Cockroach Operator" + echo ">>> Checking if Cockroach Operator is deployed..." + if kubectl get --namespace cockroach-operator-system deployment/cockroach-operator-manager &> /dev/null; then + echo ">>> Cockroach Operator is present; skipping step." + else + echo ">>> Deploy Cockroach Operator" + sed "s/%TFS_CRDB_NAMESPACE%/${CRDB_NAMESPACE}/g" "${CRDB_MANIFESTS_PATH}/operator.yaml" \ + > "${TMP_MANIFESTS_FOLDER}/crdb_operator.yaml" + kubectl apply -f "${TMP_MANIFESTS_FOLDER}/crdb_operator.yaml" + kubectl wait --namespace cockroach-operator-system --for=condition=Available=True --timeout=300s \ + deployment/cockroach-operator-manager + #kubectl wait --namespace cockroach-operator-system --for=jsonpath='{.status.readyReplicas}'=1 --timeout=300s \ + # deployment/cockroach-operator-manager + + echo ">>> Waiting for Cockroach Operator Webhock service..." + while ! kubectl get service cockroach-operator-webhook-service --namespace cockroach-operator-system &> /dev/null; do + printf "%c" "." + sleep 1 + done + WEBHOOK_SERVICE_DATA=$(kubectl get service cockroach-operator-webhook-service --namespace cockroach-operator-system -o json) + WEBHOOK_SERVICE_HOST=$(echo ${WEBHOOK_SERVICE_DATA} | jq -r '.spec.clusterIP') + WEBHOOK_SERVICE_PORT=$(echo ${WEBHOOK_SERVICE_DATA} | jq -r '.spec.ports[] | select(.targetPort==9443) | .port') + WEBHOOK_URL="https://${WEBHOOK_SERVICE_HOST}:${WEBHOOK_SERVICE_PORT}/mutate-crdb-cockroachlabs-com-v1alpha1-crdbcluster?timeout=10s" + while ! curl --insecure --header 'Content-Type: application/json' ${WEBHOOK_URL} &> /dev/null; do + printf "%c" "." + sleep 1 + done + fi + echo + + echo "CockroachDB Namespace" + echo ">>> Create CockroachDB Namespace (if missing)" + kubectl create namespace ${CRDB_NAMESPACE} + echo + + echo "CockroachDB" + echo ">>> Checking if CockroachDB is deployed..." + if kubectl get --namespace ${CRDB_NAMESPACE} statefulset/cockroachdb &> /dev/null; then + echo ">>> CockroachDB is present; skipping step." + else + echo ">>> Deploy CockroachDB" + cp "${CRDB_MANIFESTS_PATH}/cluster.yaml" "${TMP_MANIFESTS_FOLDER}/crdb_cluster.yaml" + kubectl apply --namespace ${CRDB_NAMESPACE} -f "${TMP_MANIFESTS_FOLDER}/crdb_cluster.yaml" + + echo ">>> Waiting CockroachDB statefulset to be created..." + while ! kubectl get --namespace ${CRDB_NAMESPACE} statefulset/cockroachdb &> /dev/null; do + printf "%c" "." + sleep 1 + done + + # Wait for statefulset condition "Available=True" does not work + # Wait for statefulset condition "jsonpath='{.status.readyReplicas}'=3" throws error: + # "error: readyReplicas is not found" + # Workaround: Check the pods are ready + #echo ">>> CockroachDB statefulset created. Waiting for readiness condition..." + #kubectl wait --namespace ${CRDB_NAMESPACE} --for=condition=Available=True --timeout=300s statefulset/cockroachdb + #kubectl wait --namespace ${CRDB_NAMESPACE} --for=jsonpath='{.status.readyReplicas}'=3 --timeout=300s \ + # statefulset/cockroachdb + echo ">>> CockroachDB statefulset created. Waiting CockroachDB pods to be created..." + while ! kubectl get --namespace ${CRDB_NAMESPACE} pod/cockroachdb-0 &> /dev/null; do + printf "%c" "." + sleep 1 + done + while ! kubectl get --namespace ${CRDB_NAMESPACE} pod/cockroachdb-1 &> /dev/null; do + printf "%c" "." + sleep 1 + done + while ! kubectl get --namespace ${CRDB_NAMESPACE} pod/cockroachdb-2 &> /dev/null; do + printf "%c" "." + sleep 1 + done + kubectl wait --namespace ${CRDB_NAMESPACE} --for=condition=Ready --timeout=300s pod/cockroachdb-0 + kubectl wait --namespace ${CRDB_NAMESPACE} --for=condition=Ready --timeout=300s pod/cockroachdb-1 + kubectl wait --namespace ${CRDB_NAMESPACE} --for=condition=Ready --timeout=300s pod/cockroachdb-2 + fi + echo + + echo "CockroachDB Client" + echo ">>> Checking if CockroachDB Client is deployed..." + if kubectl get --namespace ${CRDB_NAMESPACE} pod/cockroachdb-client-secure &> /dev/null; then + echo ">>> CockroachDB Client is present; skipping step." + else + echo ">>> Deploy CockroachDB Client" + cp "${CRDB_MANIFESTS_PATH}/client-secure-operator.yaml" "${TMP_MANIFESTS_FOLDER}/crdb_client-secure-operator.yaml" + kubectl create --namespace ${CRDB_NAMESPACE} -f "${TMP_MANIFESTS_FOLDER}/crdb_client-secure-operator.yaml" + kubectl wait --namespace ${CRDB_NAMESPACE} --for=condition=Ready --timeout=300s pod/cockroachdb-client-secure + fi + echo + + echo "Add tfs user and grant admin rights" + kubectl exec -it cockroachdb-client-secure --namespace ${CRDB_NAMESPACE} -- \ + ./cockroach sql --certs-dir=/cockroach/cockroach-certs --host=cockroachdb-public --execute \ + "CREATE USER ${CRDB_USERNAME} WITH PASSWORD '${CRDB_PASSWORD}'; GRANT admin TO ${CRDB_USERNAME};" + echo + + echo "CockroachDB Port Mapping" + echo ">>> Expose CockroachDB SQL port (26257)" + CRDB_SQL_PORT=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}') + PATCH='{"data": {"'${CRDB_SQL_PORT}'": "'${CRDB_NAMESPACE}'/cockroachdb-public:'${CRDB_SQL_PORT}'"}}' + kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}" + + PORT_MAP='{"containerPort": '${CRDB_SQL_PORT}', "hostPort": '${CRDB_SQL_PORT}'}' + CONTAINER='{"name": "nginx-ingress-microk8s", "ports": ['${PORT_MAP}']}' + PATCH='{"spec": {"template": {"spec": {"containers": ['${CONTAINER}']}}}}' + kubectl patch daemonset nginx-ingress-microk8s-controller --namespace ingress --patch "${PATCH}" + echo + + echo ">>> Expose CockroachDB HTTP Mgmt GUI port (8080)" + CRDB_GUI_PORT=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="http")].port}') + PATCH='{"data": {"'${CRDB_GUI_PORT}'": "'${CRDB_NAMESPACE}'/cockroachdb-public:'${CRDB_GUI_PORT}'"}}' + kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}" + + PORT_MAP='{"containerPort": '${CRDB_GUI_PORT}', "hostPort": '${CRDB_GUI_PORT}'}' + CONTAINER='{"name": "nginx-ingress-microk8s", "ports": ['${PORT_MAP}']}' + PATCH='{"spec": {"template": {"spec": {"containers": ['${CONTAINER}']}}}}' + kubectl patch daemonset nginx-ingress-microk8s-controller --namespace ingress --patch "${PATCH}" + echo + + echo "Create secret with CockroachDB data" + kubectl create secret generic ${CRDB_SECRET_NAME} --namespace ${CRDB_SECRET_NAMESPACE} --type='Opaque' \ + --from-literal=namespace=${CRDB_NAMESPACE} \ + --from-literal=sql_port=${CRDB_SQL_PORT} \ + --from-literal=gui_port=${CRDB_GUI_PORT} \ + --from-literal=database=${CRDB_DATABASE} \ + --from-literal=username=${CRDB_USERNAME} \ + --from-literal=password="'"${CRDB_PASSWORD}"'" \ + --from-literal=sslmode=require + + kubectl get all --all-namespaces +} + +function crdb_undeploy_cluster() { + echo "Delete secret with CockroachDB data" + kubectl delete secret ${CRDB_SECRET_NAME} --namespace ${CRDB_SECRET_NAMESPACE} --ignore-not-found + echo + + echo "CockroachDB Client" + echo ">>> Checking if CockroachDB Client is deployed..." + if kubectl get --namespace ${CRDB_NAMESPACE} pod/cockroachdb-client-secure &> /dev/null; then + echo ">>> Undeploy CockroachDB Client" + kubectl delete --namespace ${CRDB_NAMESPACE} -f "${TMP_MANIFESTS_FOLDER}/crdb_client-secure-operator.yaml" \ + --ignore-not-found + else + echo ">>> CockroachDB Client is not present; skipping step." + fi + echo + + echo "CockroachDB" + echo ">>> Checking if CockroachDB is deployed..." + if kubectl get --namespace ${CRDB_NAMESPACE} statefulset/cockroachdb &> /dev/null; then + echo ">>> Undeploy CockroachDB" + kubectl delete --namespace ${CRDB_NAMESPACE} -f "${TMP_MANIFESTS_FOLDER}/crdb_cluster.yaml" --ignore-not-found + else + echo ">>> CockroachDB is not present; skipping step." + fi + echo + + echo "CockroachDB Namespace" + echo ">>> Delete CockroachDB Namespace (if exists)" + echo "NOTE: this step might take few minutes to complete!" + kubectl delete namespace ${CRDB_NAMESPACE} --ignore-not-found + echo + + echo "CockroachDB Operator" + echo ">>> Checking if CockroachDB Operator is deployed..." + if kubectl get --namespace cockroach-operator-system deployment/cockroach-operator-manager &> /dev/null; then + echo ">>> Undeploy CockroachDB Operator" + kubectl delete -f "${TMP_MANIFESTS_FOLDER}/crdb_operator.yaml" --ignore-not-found + else + echo ">>> CockroachDB Operator is not present; skipping step." + fi + echo + + echo "CockroachDB Operator CRDs" + echo ">>> Delete CockroachDB Operator CRDs (if they exist)" + kubectl delete -f "${TMP_MANIFESTS_FOLDER}/crdb_crds.yaml" --ignore-not-found + echo +} + +function crdb_drop_database_cluster() { + echo "Drop database if exists" + kubectl exec -it --namespace ${CRDB_NAMESPACE} cockroachdb-client-secure -- \ + ./cockroach sql --certs-dir=/cockroach/cockroach-certs --host=cockroachdb-public --execute \ + "DROP DATABASE IF EXISTS ${CRDB_DATABASE};" + echo +} + +if [ "$CRDB_DEPLOY_MODE" == "single" ]; then + if [ "$CRDB_REDEPLOY" == "YES" ]; then + crdb_undeploy_single + elif [ "$CRDB_DROP_DATABASE_IF_EXISTS" == "YES" ]; then + crdb_drop_database_single + fi + crdb_deploy_single +elif [ "$CRDB_DEPLOY_MODE" == "cluster" ]; then + if [ "$CRDB_REDEPLOY" == "YES" ]; then + crdb_undeploy_cluster + elif [ "$CRDB_DROP_DATABASE_IF_EXISTS" == "YES" ]; then + crdb_drop_database_cluster + fi + crdb_deploy_cluster +else + echo "Unsupported value: CRDB_DEPLOY_MODE=$CRDB_DEPLOY_MODE" +fi diff --git a/deploy_component.sh b/deploy/deploy_component.sh similarity index 94% rename from deploy_component.sh rename to deploy/deploy_component.sh index a4cf6184c..f3cbddd8f 100755 --- a/deploy_component.sh +++ b/deploy/deploy_component.sh @@ -18,10 +18,9 @@ # Read deployment settings ######################################################################################################################## -# If not already set, set the URL of your local Docker registry where the images will be uploaded to. -# Leave it blank if you do not want to use any Docker registry. -export TFS_REGISTRY_IMAGE=${TFS_REGISTRY_IMAGE:-""} -#export TFS_REGISTRY_IMAGE="http://my-container-registry.local/" +# If not already set, set the URL of the Docker registry where the images will be uploaded to. +# By default, assume internal MicroK8s registry is used. +export TFS_REGISTRY_IMAGES=${TFS_REGISTRY_IMAGES:-"http://localhost:32000/tfs/"} TFS_COMPONENTS=$1 @@ -55,7 +54,7 @@ ENV_VARS_SCRIPT=tfs_runtime_env_vars.sh for COMPONENT in $TFS_COMPONENTS; do echo "Processing '$COMPONENT' component..." IMAGE_NAME="$COMPONENT:$TFS_IMAGE_TAG" - IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$IMAGE_NAME" | sed 's,//,/,g' | sed 's,http:/,,g') + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$IMAGE_NAME" | sed 's,//,/,g' | sed 's,http:/,,g') echo " Building Docker image..." BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}.log" @@ -74,8 +73,8 @@ for COMPONENT in $TFS_COMPONENTS; do docker build -t "$IMAGE_NAME" -f ./src/"$COMPONENT"/Dockerfile . > "$BUILD_LOG" fi - if [ -n "$TFS_REGISTRY_IMAGE" ]; then - echo " Pushing Docker image to '$TFS_REGISTRY_IMAGE'..." + if [ -n "$TFS_REGISTRY_IMAGES" ]; then + echo " Pushing Docker image to '$TFS_REGISTRY_IMAGES'..." if [ "$COMPONENT" == "pathcomp" ]; then TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-frontend.log" @@ -102,7 +101,7 @@ for COMPONENT in $TFS_COMPONENTS; do MANIFEST="$TMP_MANIFESTS_FOLDER/${COMPONENT}service.yaml" cp ./manifests/"${COMPONENT}"service.yaml "$MANIFEST" - if [ -n "$TFS_REGISTRY_IMAGE" ]; then + if [ -n "$TFS_REGISTRY_IMAGES" ]; then # Registry is set if [ "$COMPONENT" == "pathcomp" ]; then VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-frontend:" "$MANIFEST" | cut -d ":" -f3) diff --git a/deploy/nats.sh b/deploy/nats.sh new file mode 100755 index 000000000..affae0b08 --- /dev/null +++ b/deploy/nats.sh @@ -0,0 +1,144 @@ +#!/bin/bash +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +######################################################################################################################## +# Read deployment settings +######################################################################################################################## + +# If not already set, set the namespace where NATS will be deployed. +export NATS_NAMESPACE=${NATS_NAMESPACE:-"nats"} + +# If not already set, set the name of the secret where NATS data and credentials will be stored. +export NATS_SECRET_NAME=${NATS_SECRET_NAME:-"nats-data"} + +# If not already set, set the namespace where the secret containing NATS data and credentials will be stored. +export NATS_SECRET_NAMESPACE=${NATS_SECRET_NAMESPACE:-"tfs"} + +# If not already set, disable flag for re-deploying NATS from scratch. +# WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE MESSAGE BROKER INFORMATION! +# If NATS_REDEPLOY is "YES", the message broker will be dropped while checking/deploying NATS. +export NATS_REDEPLOY=${NATS_REDEPLOY:-""} + +######################################################################################################################## +# Automated steps start here +######################################################################################################################## + +# Constants +TMP_FOLDER="./tmp" +NATS_MANIFESTS_PATH="manifests/nats" + +# Create a tmp folder for files modified during the deployment +TMP_MANIFESTS_FOLDER="$TMP_FOLDER/manifests" +mkdir -p $TMP_MANIFESTS_FOLDER + +function nats_deploy_single() { + echo "NATS Namespace" + echo ">>> Create NATS Namespace (if missing)" + kubectl create namespace ${NATS_NAMESPACE} + echo + + echo "Add NATS Helm Chart" + helm3 repo add nats https://nats-io.github.io/k8s/helm/charts/ + echo + + echo "Install NATS (single-node)" + echo ">>> Checking if NATS is deployed..." + if kubectl get --namespace ${NATS_NAMESPACE} statefulset/nats &> /dev/null; then + echo ">>> NATS is present; skipping step." + else + echo ">>> Deploy NATS" + helm3 install nats nats/nats --namespace ${NATS_NAMESPACE} --set nats.image.tag=2.9-alpine + + echo ">>> Waiting NATS statefulset to be created..." + while ! kubectl get --namespace ${NATS_NAMESPACE} statefulset/nats &> /dev/null; do + printf "%c" "." + sleep 1 + done + + # Wait for statefulset condition "Available=True" does not work + # Wait for statefulset condition "jsonpath='{.status.readyReplicas}'=3" throws error: + # "error: readyReplicas is not found" + # Workaround: Check the pods are ready + #echo ">>> NATS statefulset created. Waiting for readiness condition..." + #kubectl wait --namespace ${NATS_NAMESPACE} --for=condition=Available=True --timeout=300s statefulset/nats + #kubectl wait --namespace ${NATS_NAMESPACE} --for=jsonpath='{.status.readyReplicas}'=3 --timeout=300s \ + # statefulset/nats + echo ">>> NATS statefulset created. Waiting NATS pods to be created..." + while ! kubectl get --namespace ${NATS_NAMESPACE} pod/nats-0 &> /dev/null; do + printf "%c" "." + sleep 1 + done + kubectl wait --namespace ${NATS_NAMESPACE} --for=condition=Ready --timeout=300s pod/nats-0 + fi + echo + + echo "NATS Port Mapping" + echo ">>> Expose NATS Client port (4222)" + NATS_CLIENT_PORT=$(kubectl --namespace ${NATS_NAMESPACE} get service nats -o 'jsonpath={.spec.ports[?(@.name=="client")].port}') + PATCH='{"data": {"'${NATS_CLIENT_PORT}'": "'${NATS_NAMESPACE}'/nats:'${NATS_CLIENT_PORT}'"}}' + kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}" + + PORT_MAP='{"containerPort": '${NATS_CLIENT_PORT}', "hostPort": '${NATS_CLIENT_PORT}'}' + CONTAINER='{"name": "nginx-ingress-microk8s", "ports": ['${PORT_MAP}']}' + PATCH='{"spec": {"template": {"spec": {"containers": ['${CONTAINER}']}}}}' + kubectl patch daemonset nginx-ingress-microk8s-controller --namespace ingress --patch "${PATCH}" + echo + + echo ">>> Expose NATS HTTP Mgmt GUI port (8222)" + NATS_GUI_PORT=$(kubectl --namespace ${NATS_NAMESPACE} get service nats -o 'jsonpath={.spec.ports[?(@.name=="monitor")].port}') + PATCH='{"data": {"'${NATS_GUI_PORT}'": "'${NATS_NAMESPACE}'/nats:'${NATS_GUI_PORT}'"}}' + kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}" + + PORT_MAP='{"containerPort": '${NATS_GUI_PORT}', "hostPort": '${NATS_GUI_PORT}'}' + CONTAINER='{"name": "nginx-ingress-microk8s", "ports": ['${PORT_MAP}']}' + PATCH='{"spec": {"template": {"spec": {"containers": ['${CONTAINER}']}}}}' + kubectl patch daemonset nginx-ingress-microk8s-controller --namespace ingress --patch "${PATCH}" + echo + + echo "Create secret with NATS data" + kubectl create secret generic ${NATS_SECRET_NAME} --namespace ${NATS_SECRET_NAMESPACE} --type='Opaque' \ + --from-literal=namespace=${NATS_NAMESPACE} \ + --from-literal=client_port=${NATS_CLIENT_PORT} \ + --from-literal=gui_port=${NATS_GUI_PORT} + + kubectl get all --all-namespaces +} + +function nats_undeploy_single() { + echo "Delete secret with NATS data" + kubectl delete secret ${NATS_SECRET_NAME} --namespace ${NATS_SECRET_NAMESPACE} --ignore-not-found + echo + + echo "NATS" + echo ">>> Checking if NATS is deployed..." + if kubectl get --namespace ${NATS_NAMESPACE} statefulset/nats &> /dev/null; then + echo ">>> Undeploy NATS" + helm3 uninstall --namespace ${NATS_NAMESPACE} nats + else + echo ">>> NATS is not present; skipping step." + fi + echo + + echo "NATS Namespace" + echo ">>> Delete NATS Namespace (if exists)" + kubectl delete namespace ${NATS_NAMESPACE} --ignore-not-found + echo +} + +if [ "$NATS_REDEPLOY" == "YES" ]; then + nats_undeploy_single +fi + +nats_deploy_single diff --git a/show_deploy.sh b/deploy/show.sh similarity index 100% rename from show_deploy.sh rename to deploy/show.sh diff --git a/deploy.sh b/deploy/tfs.sh similarity index 90% rename from deploy.sh rename to deploy/tfs.sh index c62778417..e017ce352 100755 --- a/deploy.sh +++ b/deploy/tfs.sh @@ -18,10 +18,9 @@ # Read deployment settings ######################################################################################################################## -# If not already set, set the URL of your local Docker registry where the images will be uploaded to. -# Leave it blank if you do not want to use any Docker registry. -export TFS_REGISTRY_IMAGE=${TFS_REGISTRY_IMAGE:-""} -#export TFS_REGISTRY_IMAGE="http://my-container-registry.local/" +# If not already set, set the URL of the Docker registry where the images will be uploaded to. +# By default, assume internal MicroK8s registry is used. +export TFS_REGISTRY_IMAGES=${TFS_REGISTRY_IMAGES:-"http://localhost:32000/tfs/"} # If not already set, set the list of components you want to build images for, and deploy. # By default, only basic components are deployed @@ -96,11 +95,11 @@ for COMPONENT in $TFS_COMPONENTS; do docker build -t "$COMPONENT:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/Dockerfile . > "$BUILD_LOG" fi - if [ -n "$TFS_REGISTRY_IMAGE" ]; then - echo " Pushing Docker image to '$TFS_REGISTRY_IMAGE'..." + if [ -n "$TFS_REGISTRY_IMAGES" ]; then + echo " Pushing Docker image to '$TFS_REGISTRY_IMAGES'..." if [ "$COMPONENT" == "pathcomp" ]; then - IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT-frontend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-frontend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-frontend.log" docker tag "$COMPONENT-frontend:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG" @@ -108,7 +107,7 @@ for COMPONENT in $TFS_COMPONENTS; do PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-frontend.log" docker push "$IMAGE_URL" > "$PUSH_LOG" - IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT-backend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-backend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-backend.log" docker tag "$COMPONENT-backend:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG" @@ -116,7 +115,7 @@ for COMPONENT in $TFS_COMPONENTS; do PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-backend.log" docker push "$IMAGE_URL" > "$PUSH_LOG" elif [ "$COMPONENT" == "dlt" ]; then - IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT-connector:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-connector:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-connector.log" docker tag "$COMPONENT-connector:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG" @@ -124,7 +123,7 @@ for COMPONENT in $TFS_COMPONENTS; do PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-connector.log" docker push "$IMAGE_URL" > "$PUSH_LOG" - IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT-gateway:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-gateway:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-gateway.log" docker tag "$COMPONENT-gateway:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG" @@ -132,7 +131,7 @@ for COMPONENT in $TFS_COMPONENTS; do PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-gateway.log" docker push "$IMAGE_URL" > "$PUSH_LOG" else - IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}.log" docker tag "$COMPONENT:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG" @@ -147,26 +146,26 @@ for COMPONENT in $TFS_COMPONENTS; do MANIFEST="$TMP_MANIFESTS_FOLDER/${COMPONENT}service.yaml" cp ./manifests/"${COMPONENT}"service.yaml "$MANIFEST" - if [ -n "$TFS_REGISTRY_IMAGE" ]; then + if [ -n "$TFS_REGISTRY_IMAGES" ]; then # Registry is set if [ "$COMPONENT" == "pathcomp" ]; then - IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT-frontend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-frontend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-frontend:" "$MANIFEST" | cut -d ":" -f3) sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-frontend:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST" - IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT-backend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-backend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-backend:" "$MANIFEST" | cut -d ":" -f3) sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-backend:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST" elif [ "$COMPONENT" == "dlt" ]; then - IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT-connector:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-connector:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-connector:" "$MANIFEST" | cut -d ":" -f3) sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-connector:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST" - IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT-gateway:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-gateway:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-gateway:" "$MANIFEST" | cut -d ":" -f3) sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-gateway:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST" else - IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f3) sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST" fi @@ -345,6 +344,3 @@ if [[ "$TFS_COMPONENTS" == *"webui"* ]] && [[ "$TFS_COMPONENTS" == *"monitoring" printf "\n\n" fi -./show_deploy.sh - -echo "Done!" diff --git a/manifests/cockroachdb/single-node.yaml b/manifests/cockroachdb/single-node.yaml new file mode 100644 index 000000000..f207d2594 --- /dev/null +++ b/manifests/cockroachdb/single-node.yaml @@ -0,0 +1,84 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Service +metadata: + name: cockroachdb-public + labels: + app.kubernetes.io/component: database + app.kubernetes.io/instance: cockroachdb + app.kubernetes.io/name: cockroachdb +spec: + type: ClusterIP + selector: + app.kubernetes.io/component: database + app.kubernetes.io/instance: cockroachdb + app.kubernetes.io/name: cockroachdb + ports: + - name: http + port: 8080 + protocol: TCP + targetPort: 8080 + - name: sql + port: 26257 + protocol: TCP + targetPort: 26257 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: cockroachdb +spec: + selector: + matchLabels: + app.kubernetes.io/component: database + app.kubernetes.io/instance: cockroachdb + app.kubernetes.io/name: cockroachdb + serviceName: "cockroachdb-public" + replicas: 1 + minReadySeconds: 5 + template: + metadata: + labels: + app.kubernetes.io/component: database + app.kubernetes.io/instance: cockroachdb + app.kubernetes.io/name: cockroachdb + spec: + terminationGracePeriodSeconds: 10 + restartPolicy: Always + containers: + - name: cockroachdb + image: cockroachdb/cockroach:latest-v22.2 + args: + - start-single-node + ports: + - containerPort: 8080 + name: http + - containerPort: 26257 + name: sql + env: + - name: COCKROACH_DATABASE + value: "%CRDB_DATABASE%" + - name: COCKROACH_USER + value: "%CRDB_USERNAME%" + - name: COCKROACH_PASSWORD + value: "%CRDB_PASSWORD%" + resources: + requests: + cpu: "250m" + memory: 1Gi + limits: + cpu: "1" + memory: 2Gi diff --git a/my_deploy.sh b/my_deploy.sh index ffd91da35..41dce9bd5 100644 --- a/my_deploy.sh +++ b/my_deploy.sh @@ -1,18 +1,28 @@ -# Set the URL of your local Docker registry where the images will be uploaded to. -export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/" +#!/bin/bash +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Set the URL of the internal MicroK8s Docker registry where the images will be uploaded to. +export TFS_REGISTRY_IMAGES=${TFS_REGISTRY_IMAGES:-"http://localhost:32000/tfs/"} # Set the list of components, separated by spaces, you want to build images for, and deploy. -# Supported components are: -# context device automation policy service compute monitoring webui -# interdomain slice pathcomp dlt -# dbscanserving opticalattackmitigator opticalattackdetector -# l3_attackmitigator l3_centralizedattackdetector l3_distributedattackdetector export TFS_COMPONENTS="context device automation monitoring pathcomp service slice compute webui" # Set the tag you want to use for your images. export TFS_IMAGE_TAG="dev" -# Set the name of the Kubernetes namespace to deploy to. +# Set the name of the Kubernetes namespace to deploy TFS to. export TFS_K8S_NAMESPACE="tfs" # Set additional manifest files to be applied after the deployment @@ -21,6 +31,45 @@ export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml" # Set the new Grafana admin password export TFS_GRAFANA_PASSWORD="admin123+" -# If not already set, disable skip-build flag. -# If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used. -export TFS_SKIP_BUILD=${TFS_SKIP_BUILD:-""} +# Disable skip-build flag to rebuild the Docker images. +export TFS_SKIP_BUILD="" + +# Set the namespace where CockroackDB will be deployed. +export CRDB_NAMESPACE="crdb" + +# Set the database username to be used by Context. +export CRDB_USERNAME="tfs" + +# Set the database user's password to be used by Context. +export CRDB_PASSWORD="tfs123" + +# Set the database name to be used by Context. +export CRDB_DATABASE="tfs" + +# Set the name of the secret where CockroachDB data and credentials will be stored. +export CRDB_SECRET_NAME="crdb-data" + +# Set the namespace where the secret containing CockroachDB data and credentials will be stored. +export CRDB_SECRET_NAMESPACE=${TFS_K8S_NAMESPACE} + +# Set CockroachDB installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/crdb.sh for additional details +export CRDB_DEPLOY_MODE="single" + +# Disable flag for dropping database, if exists. +export CRDB_DROP_DATABASE_IF_EXISTS="" + +# Disable flag for re-deploying CockroachDB from scratch. +export CRDB_REDEPLOY="" + +# Set the namespace where NATS will be deployed. +export NATS_NAMESPACE="nats" + +# Set the name of the secret where NATS data and credentials will be stored. +export NATS_SECRET_NAME="nats-data" + +# Set the namespace where the secret containing NATS data and credentials will be stored. +export NATS_SECRET_NAMESPACE=${TFS_K8S_NAMESPACE} + +# Disable flag for re-deploying NATS from scratch. +export NATS_REDEPLOY=${NATS_REDEPLOY:-""} diff --git a/report_coverage_slice.sh b/report_coverage_slice.sh deleted file mode 100755 index f783ec069..000000000 --- a/report_coverage_slice.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -./report_coverage_all.sh | grep --color -E -i "^slice/.*$|$" diff --git a/src/context/data/cleanup_commands.sql b/src/context/data/cleanup_commands.sql new file mode 100644 index 000000000..00a522d85 --- /dev/null +++ b/src/context/data/cleanup_commands.sql @@ -0,0 +1,12 @@ +USE tfs; + +DELETE FROM policyrule WHERE 1=1; +DELETE FROM slice WHERE 1=1; +DELETE FROM connection WHERE 1=1; +DELETE FROM service WHERE 1=1; + +DELETE FROM link WHERE 1=1; +DELETE FROM endpoint WHERE 1=1; +DELETE FROM device WHERE 1=1; +DELETE FROM topology WHERE 1=1; +DELETE FROM context WHERE 1=1; -- GitLab From bfa7d1ee997856a05a813ade33ccef526f13b1c8 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Thu, 26 Jan 2023 15:11:36 +0000 Subject: [PATCH 145/158] Deploy scripts: - reorganized deploy scripts in a new "deploy" folder --- deploy/{deploy_component.sh => component.sh} | 0 deploy_mock_blockchain.sh => deploy/mock_blockchain.sh | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename deploy/{deploy_component.sh => component.sh} (100%) rename deploy_mock_blockchain.sh => deploy/mock_blockchain.sh (100%) diff --git a/deploy/deploy_component.sh b/deploy/component.sh similarity index 100% rename from deploy/deploy_component.sh rename to deploy/component.sh diff --git a/deploy_mock_blockchain.sh b/deploy/mock_blockchain.sh similarity index 100% rename from deploy_mock_blockchain.sh rename to deploy/mock_blockchain.sh -- GitLab From 7fcf140701843f071c690b8952302e114de3d586 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Thu, 26 Jan 2023 15:39:39 +0000 Subject: [PATCH 146/158] Readme, Manifests and Deploy scripts: - updated old gitlab.com URLs by new labs.etsi.org URLs. - updated Context to use secrets generated by CockroachDB and NATS --- README.md | 9 +++--- deploy/component.sh | 2 +- deploy/crdb.sh | 28 +++++++++---------- deploy/mock_blockchain.sh | 2 +- deploy/nats.sh | 6 ++-- deploy/tfs.sh | 2 +- manifests/computeservice.yaml | 2 +- manifests/contextservice.yaml | 13 +++++---- manifests/dbscanservingservice.yaml | 2 +- manifests/deviceservice.yaml | 2 +- manifests/dltservice.yaml | 4 +-- manifests/interdomainservice.yaml | 2 +- manifests/l3_attackmitigatorservice.yaml | 2 +- .../l3_centralizedattackdetectorservice.yaml | 2 +- .../l3_distributedattackdetectorservice.yaml | 2 +- manifests/load_generatorservice.yaml | 2 +- manifests/mock_blockchain.yaml | 2 +- manifests/monitoringservice.yaml | 2 +- manifests/opticalattackmitigatorservice.yaml | 2 +- ...ticalcentralizedattackdetectorservice.yaml | 2 +- manifests/pathcompservice.yaml | 4 +-- manifests/serviceservice.yaml | 2 +- manifests/sliceservice.yaml | 2 +- manifests/webuiservice.yaml | 2 +- scripts/old/deploy_in_kubernetes.sh | 2 +- .../src/main/resources/application.yml | 4 +-- .../target/kubernetes/kubernetes.yml | 2 +- .../backend/nats/NatsBackend.py | 9 +++++- src/context/service/database/Engine.py | 11 +++++++- src/policy/src/main/resources/application.yml | 4 +-- src/policy/target/kubernetes/kubernetes.yml | 2 +- src/tests/oeccpsc22/deploy_in_kubernetes.sh | 2 +- 32 files changed, 77 insertions(+), 59 deletions(-) diff --git a/README.md b/README.md index 0336b9f6c..67f6895ce 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,11 @@ # TeraFlowSDN Controller -[Teraflow H2020 project](https://teraflow-h2020.eu/) - Secured autonomic traffic management for a Tera of SDN Flows +[ETSI OpenSource Group for TeraFlowSDN](https://tfs.etsi.org/) +Former, [Teraflow H2020 project](https://teraflow-h2020.eu/) - Secured autonomic traffic management for a Tera of SDN Flows -Branch "master" : [](https://gitlab.com/teraflow-h2020/controller/-/commits/master) [](https://gitlab.com/teraflow-h2020/controller/-/commits/master) +Branch "master" : [](https://labs.etsi.org/rep/tfs/controller/-/commits/master) [](https://labs.etsi.org/rep/tfs/controller/-/commits/master) -Branch "develop" : [](https://gitlab.com/teraflow-h2020/controller/-/commits/develop) [](https://gitlab.com/teraflow-h2020/controller/-/commits/develop) +Branch "develop" : [](https://labs.etsi.org/rep/tfs/controller/-/commits/develop) [](https://labs.etsi.org/rep/tfs/controller/-/commits/develop) # Installation Instructions -For devel and upcoming release 2.0, we have prepared the following tutorial: [TeraFlowSDN tutorial](https://gitlab.com/teraflow-h2020/controller/-/tree/develop/tutorial). +For devel and upcoming release 2.0, check the Wiki pages: [TeraFlowSDN Wiki](https://labs.etsi.org/rep/tfs/controller/-/wikis/home). diff --git a/deploy/component.sh b/deploy/component.sh index f3cbddd8f..443bee601 100755 --- a/deploy/component.sh +++ b/deploy/component.sh @@ -41,7 +41,7 @@ export TFS_GRAFANA_PASSWORD=${TFS_GRAFANA_PASSWORD:-"admin123+"} ######################################################################################################################## # Constants -GITLAB_REPO_URL="registry.gitlab.com/teraflow-h2020/controller" +GITLAB_REPO_URL="labs.etsi.org:5050/tfs/controller" TMP_FOLDER="./tmp" # Create a tmp folder for files modified during the deployment diff --git a/deploy/crdb.sh b/deploy/crdb.sh index c3cae9d40..ecec39101 100755 --- a/deploy/crdb.sh +++ b/deploy/crdb.sh @@ -141,13 +141,13 @@ function crdb_deploy_single() { echo "Create secret with CockroachDB data" kubectl create secret generic ${CRDB_SECRET_NAME} --namespace ${CRDB_SECRET_NAMESPACE} --type='Opaque' \ - --from-literal=namespace=${CRDB_NAMESPACE} \ - --from-literal=sql_port=${CRDB_SQL_PORT} \ - --from-literal=gui_port=${CRDB_GUI_PORT} \ - --from-literal=database=${CRDB_DATABASE} \ - --from-literal=username=${CRDB_USERNAME} \ - --from-literal=password="'"${CRDB_PASSWORD}"'" \ - --from-literal=sslmode=require + --from-literal=CRDB_NAMESPACE=${CRDB_NAMESPACE} \ + --from-literal=CRDB_SQL_PORT=${CRDB_SQL_PORT} \ + --from-literal=CRDB_GUI_PORT=${CRDB_GUI_PORT} \ + --from-literal=CRDB_DATABASE=${CRDB_DATABASE} \ + --from-literal=CRDB_USERNAME=${CRDB_USERNAME} \ + --from-literal=CRDB_PASSWORD="'"${CRDB_PASSWORD}"'" \ + --from-literal=CRDB_SSLMODE=require kubectl get all --all-namespaces } @@ -310,13 +310,13 @@ function crdb_deploy_cluster() { echo "Create secret with CockroachDB data" kubectl create secret generic ${CRDB_SECRET_NAME} --namespace ${CRDB_SECRET_NAMESPACE} --type='Opaque' \ - --from-literal=namespace=${CRDB_NAMESPACE} \ - --from-literal=sql_port=${CRDB_SQL_PORT} \ - --from-literal=gui_port=${CRDB_GUI_PORT} \ - --from-literal=database=${CRDB_DATABASE} \ - --from-literal=username=${CRDB_USERNAME} \ - --from-literal=password="'"${CRDB_PASSWORD}"'" \ - --from-literal=sslmode=require + --from-literal=CRDB_NAMESPACE=${CRDB_NAMESPACE} \ + --from-literal=CRDB_SQL_PORT=${CRDB_SQL_PORT} \ + --from-literal=CRDB_GUI_PORT=${CRDB_GUI_PORT} \ + --from-literal=CRDB_DATABASE=${CRDB_DATABASE} \ + --from-literal=CRDB_USERNAME=${CRDB_USERNAME} \ + --from-literal=CRDB_PASSWORD="'"${CRDB_PASSWORD}"'" \ + --from-literal=CRDB_SSLMODE=require kubectl get all --all-namespaces } diff --git a/deploy/mock_blockchain.sh b/deploy/mock_blockchain.sh index 066820fc0..f741f069f 100755 --- a/deploy/mock_blockchain.sh +++ b/deploy/mock_blockchain.sh @@ -34,7 +34,7 @@ COMPONENT="mock_blockchain" ######################################################################################################################## # Constants -GITLAB_REPO_URL="registry.gitlab.com/teraflow-h2020/controller" +GITLAB_REPO_URL="labs.etsi.org:5050/tfs/controller" TMP_FOLDER="./tmp" # Create a tmp folder for files modified during the deployment diff --git a/deploy/nats.sh b/deploy/nats.sh index affae0b08..757b0984f 100755 --- a/deploy/nats.sh +++ b/deploy/nats.sh @@ -109,9 +109,9 @@ function nats_deploy_single() { echo "Create secret with NATS data" kubectl create secret generic ${NATS_SECRET_NAME} --namespace ${NATS_SECRET_NAMESPACE} --type='Opaque' \ - --from-literal=namespace=${NATS_NAMESPACE} \ - --from-literal=client_port=${NATS_CLIENT_PORT} \ - --from-literal=gui_port=${NATS_GUI_PORT} + --from-literal=NATS_NAMESPACE=${NATS_NAMESPACE} \ + --from-literal=NATS_CLIENT_PORT=${NATS_CLIENT_PORT} \ + --from-literal=NATS_GUI_PORT=${NATS_GUI_PORT} kubectl get all --all-namespaces } diff --git a/deploy/tfs.sh b/deploy/tfs.sh index e017ce352..efa4875c6 100755 --- a/deploy/tfs.sh +++ b/deploy/tfs.sh @@ -47,7 +47,7 @@ export TFS_SKIP_BUILD=${TFS_SKIP_BUILD:-""} ######################################################################################################################## # Constants -GITLAB_REPO_URL="registry.gitlab.com/teraflow-h2020/controller" +GITLAB_REPO_URL="labs.etsi.org:5050/tfs/controller" TMP_FOLDER="./tmp" # Create a tmp folder for files modified during the deployment diff --git a/manifests/computeservice.yaml b/manifests/computeservice.yaml index 0c8d0a672..89a4a39e5 100644 --- a/manifests/computeservice.yaml +++ b/manifests/computeservice.yaml @@ -28,7 +28,7 @@ spec: terminationGracePeriodSeconds: 5 containers: - name: server - image: registry.gitlab.com/teraflow-h2020/controller/compute:latest + image: labs.etsi.org:5050/tfs/controller/compute:latest imagePullPolicy: Always ports: - containerPort: 8080 diff --git a/manifests/contextservice.yaml b/manifests/contextservice.yaml index f5844d81b..3bb1a01d9 100644 --- a/manifests/contextservice.yaml +++ b/manifests/contextservice.yaml @@ -29,20 +29,21 @@ spec: terminationGracePeriodSeconds: 5 containers: - name: server - image: registry.gitlab.com/teraflow-h2020/controller/context:latest + image: labs.etsi.org:5050/tfs/controller/context:latest imagePullPolicy: Always ports: - containerPort: 1010 - containerPort: 9192 env: - - name: CRDB_URI - value: "cockroachdb://tfs:tfs123@cockroachdb-public.crdb.svc.cluster.local:26257/tfs?sslmode=require" - name: MB_BACKEND - value: "inmemory" - #- name: NATS_URI - # value: "nats://tfs:tfs123@nats-public.nats.svc.cluster.local:4222" + value: "nats" - name: LOG_LEVEL value: "DEBUG" + envFrom: + - secretRef: + name: crdb-data + - secretRef: + name: nats-data readinessProbe: exec: command: ["/bin/grpc_health_probe", "-addr=:1010"] diff --git a/manifests/dbscanservingservice.yaml b/manifests/dbscanservingservice.yaml index 9553ed556..e1f73a237 100644 --- a/manifests/dbscanservingservice.yaml +++ b/manifests/dbscanservingservice.yaml @@ -28,7 +28,7 @@ spec: terminationGracePeriodSeconds: 5 containers: - name: server - image: registry.gitlab.com/teraflow-h2020/controller/dbscanserving:latest + image: labs.etsi.org:5050/tfs/controller/dbscanserving:latest imagePullPolicy: Always ports: - containerPort: 10006 diff --git a/manifests/deviceservice.yaml b/manifests/deviceservice.yaml index 960096b93..5c72263eb 100644 --- a/manifests/deviceservice.yaml +++ b/manifests/deviceservice.yaml @@ -29,7 +29,7 @@ spec: terminationGracePeriodSeconds: 5 containers: - name: server - image: registry.gitlab.com/teraflow-h2020/controller/device:latest + image: labs.etsi.org:5050/tfs/controller/device:latest imagePullPolicy: Always ports: - containerPort: 2020 diff --git a/manifests/dltservice.yaml b/manifests/dltservice.yaml index 0f6b5bb9d..c067960b7 100644 --- a/manifests/dltservice.yaml +++ b/manifests/dltservice.yaml @@ -28,7 +28,7 @@ spec: terminationGracePeriodSeconds: 5 containers: - name: connector - image: registry.gitlab.com/teraflow-h2020/controller/dlt-connector:latest + image: labs.etsi.org:5050/tfs/controller/dlt-connector:latest imagePullPolicy: Always ports: - containerPort: 8080 @@ -55,7 +55,7 @@ spec: cpu: 500m memory: 512Mi - name: gateway - image: registry.gitlab.com/teraflow-h2020/controller/dlt-gateway:latest + image: labs.etsi.org:5050/tfs/controller/dlt-gateway:latest imagePullPolicy: Always ports: - containerPort: 50051 diff --git a/manifests/interdomainservice.yaml b/manifests/interdomainservice.yaml index b275035f6..b21434361 100644 --- a/manifests/interdomainservice.yaml +++ b/manifests/interdomainservice.yaml @@ -28,7 +28,7 @@ spec: terminationGracePeriodSeconds: 5 containers: - name: server - image: registry.gitlab.com/teraflow-h2020/controller/interdomain:latest + image: labs.etsi.org:5050/tfs/controller/interdomain:latest imagePullPolicy: Always ports: - containerPort: 10010 diff --git a/manifests/l3_attackmitigatorservice.yaml b/manifests/l3_attackmitigatorservice.yaml index 2240776eb..592143089 100644 --- a/manifests/l3_attackmitigatorservice.yaml +++ b/manifests/l3_attackmitigatorservice.yaml @@ -28,7 +28,7 @@ spec: terminationGracePeriodSeconds: 5 containers: - name: server - image: registry.gitlab.com/teraflow-h2020/controller/l3_attackmitigator:latest + image: labs.etsi.org:5050/tfs/controller/l3_attackmitigator:latest imagePullPolicy: Always ports: - containerPort: 10002 diff --git a/manifests/l3_centralizedattackdetectorservice.yaml b/manifests/l3_centralizedattackdetectorservice.yaml index fa7ee9dcc..8672cab95 100644 --- a/manifests/l3_centralizedattackdetectorservice.yaml +++ b/manifests/l3_centralizedattackdetectorservice.yaml @@ -28,7 +28,7 @@ spec: terminationGracePeriodSeconds: 5 containers: - name: server - image: registry.gitlab.com/teraflow-h2020/controller/l3_centralizedattackdetector:latest + image: labs.etsi.org:5050/tfs/controller/l3_centralizedattackdetector:latest imagePullPolicy: Always ports: - containerPort: 10001 diff --git a/manifests/l3_distributedattackdetectorservice.yaml b/manifests/l3_distributedattackdetectorservice.yaml index 6b28f68dd..8765b7171 100644 --- a/manifests/l3_distributedattackdetectorservice.yaml +++ b/manifests/l3_distributedattackdetectorservice.yaml @@ -28,7 +28,7 @@ spec: terminationGracePeriodSeconds: 5 containers: - name: server - image: registry.gitlab.com/teraflow-h2020/controller/l3_distributedattackdetector:latest + image: labs.etsi.org:5050/tfs/controller/l3_distributedattackdetector:latest imagePullPolicy: Always ports: - containerPort: 10000 diff --git a/manifests/load_generatorservice.yaml b/manifests/load_generatorservice.yaml index 88b1fa397..4d7b32d1b 100644 --- a/manifests/load_generatorservice.yaml +++ b/manifests/load_generatorservice.yaml @@ -29,7 +29,7 @@ spec: terminationGracePeriodSeconds: 5 containers: - name: server - image: registry.gitlab.com/teraflow-h2020/controller/load_generator:latest + image: labs.etsi.org:5050/tfs/controller/load_generator:latest imagePullPolicy: Always ports: - containerPort: 50052 diff --git a/manifests/mock_blockchain.yaml b/manifests/mock_blockchain.yaml index bf9abac70..17b32a47e 100644 --- a/manifests/mock_blockchain.yaml +++ b/manifests/mock_blockchain.yaml @@ -28,7 +28,7 @@ spec: terminationGracePeriodSeconds: 5 containers: - name: server - image: registry.gitlab.com/teraflow-h2020/controller/mock_blockchain:latest + image: labs.etsi.org:5050/tfs/controller/mock_blockchain:latest imagePullPolicy: Always ports: - containerPort: 50051 diff --git a/manifests/monitoringservice.yaml b/manifests/monitoringservice.yaml index aed8d1c51..b5f3042ba 100644 --- a/manifests/monitoringservice.yaml +++ b/manifests/monitoringservice.yaml @@ -66,7 +66,7 @@ spec: restartPolicy: Always containers: - name: server - image: registry.gitlab.com/teraflow-h2020/controller/monitoring:latest + image: labs.etsi.org:5050/tfs/controller/monitoring:latest imagePullPolicy: Always ports: - name: grpc diff --git a/manifests/opticalattackmitigatorservice.yaml b/manifests/opticalattackmitigatorservice.yaml index afe2e4069..0252eec21 100644 --- a/manifests/opticalattackmitigatorservice.yaml +++ b/manifests/opticalattackmitigatorservice.yaml @@ -28,7 +28,7 @@ spec: terminationGracePeriodSeconds: 5 containers: - name: server - image: registry.gitlab.com/teraflow-h2020/controller/opticalattackmitigator:latest + image: labs.etsi.org:5050/tfs/controller/opticalattackmitigator:latest imagePullPolicy: Always ports: - containerPort: 10007 diff --git a/manifests/opticalcentralizedattackdetectorservice.yaml b/manifests/opticalcentralizedattackdetectorservice.yaml index 664bcb543..4a49f8b13 100644 --- a/manifests/opticalcentralizedattackdetectorservice.yaml +++ b/manifests/opticalcentralizedattackdetectorservice.yaml @@ -28,7 +28,7 @@ spec: terminationGracePeriodSeconds: 5 containers: - name: server - image: registry.gitlab.com/teraflow-h2020/controller/opticalcentralizedattackdetector:latest + image: labs.etsi.org:5050/tfs/controller/opticalcentralizedattackdetector:latest imagePullPolicy: Always ports: - containerPort: 10005 diff --git a/manifests/pathcompservice.yaml b/manifests/pathcompservice.yaml index e9b890e76..4f7a65c45 100644 --- a/manifests/pathcompservice.yaml +++ b/manifests/pathcompservice.yaml @@ -29,7 +29,7 @@ spec: terminationGracePeriodSeconds: 5 containers: - name: frontend - image: registry.gitlab.com/teraflow-h2020/controller/pathcomp-frontend:latest + image: labs.etsi.org:5050/tfs/controller/pathcomp-frontend:latest imagePullPolicy: Always ports: - containerPort: 10020 @@ -51,7 +51,7 @@ spec: cpu: 500m memory: 512Mi - name: backend - image: registry.gitlab.com/teraflow-h2020/controller/pathcomp-backend:latest + image: labs.etsi.org:5050/tfs/controller/pathcomp-backend:latest imagePullPolicy: Always #readinessProbe: # httpGet: diff --git a/manifests/serviceservice.yaml b/manifests/serviceservice.yaml index b24bf13f0..561b204cb 100644 --- a/manifests/serviceservice.yaml +++ b/manifests/serviceservice.yaml @@ -29,7 +29,7 @@ spec: terminationGracePeriodSeconds: 5 containers: - name: server - image: registry.gitlab.com/teraflow-h2020/controller/service:latest + image: labs.etsi.org:5050/tfs/controller/service:latest imagePullPolicy: Always ports: - containerPort: 3030 diff --git a/manifests/sliceservice.yaml b/manifests/sliceservice.yaml index 375344a97..ca7641de3 100644 --- a/manifests/sliceservice.yaml +++ b/manifests/sliceservice.yaml @@ -29,7 +29,7 @@ spec: terminationGracePeriodSeconds: 5 containers: - name: server - image: registry.gitlab.com/teraflow-h2020/controller/slice:latest + image: labs.etsi.org:5050/tfs/controller/slice:latest imagePullPolicy: Always ports: - containerPort: 4040 diff --git a/manifests/webuiservice.yaml b/manifests/webuiservice.yaml index dd8004ad8..3828ee419 100644 --- a/manifests/webuiservice.yaml +++ b/manifests/webuiservice.yaml @@ -32,7 +32,7 @@ spec: - 0 containers: - name: server - image: registry.gitlab.com/teraflow-h2020/controller/webui:latest + image: labs.etsi.org:5050/tfs/controller/webui:latest imagePullPolicy: Always ports: - containerPort: 8004 diff --git a/scripts/old/deploy_in_kubernetes.sh b/scripts/old/deploy_in_kubernetes.sh index 89f45a548..c85354137 100755 --- a/scripts/old/deploy_in_kubernetes.sh +++ b/scripts/old/deploy_in_kubernetes.sh @@ -43,7 +43,7 @@ export EXTRA_MANIFESTS=${EXTRA_MANIFESTS:-""} ######################################################################################################################## # Constants -GITLAB_REPO_URL="registry.gitlab.com/teraflow-h2020/controller" +GITLAB_REPO_URL="labs.etsi.org:5050/tfs/controller" TMP_FOLDER="./tmp" # Create a tmp folder for files modified during the deployment diff --git a/src/automation/src/main/resources/application.yml b/src/automation/src/main/resources/application.yml index bc89d4348..62cf8fc8e 100644 --- a/src/automation/src/main/resources/application.yml +++ b/src/automation/src/main/resources/application.yml @@ -33,9 +33,9 @@ quarkus: port: 8080 container-image: - group: teraflow-h2020 + group: tfs name: controller/automation - registry: registry.gitlab.com + registry: labs.etsi.org:5050 kubernetes: name: automationservice diff --git a/src/automation/target/kubernetes/kubernetes.yml b/src/automation/target/kubernetes/kubernetes.yml index 8bc14b935..f4f1c7dae 100644 --- a/src/automation/target/kubernetes/kubernetes.yml +++ b/src/automation/target/kubernetes/kubernetes.yml @@ -52,7 +52,7 @@ spec: value: contextservice - name: DEVICE_SERVICE_HOST value: deviceservice - image: registry.gitlab.com/teraflow-h2020/controller/automation:0.2.0 + image: labs.etsi.org:5050/tfs/controller/automation:0.2.0 imagePullPolicy: Always livenessProbe: failureThreshold: 3 diff --git a/src/common/message_broker/backend/nats/NatsBackend.py b/src/common/message_broker/backend/nats/NatsBackend.py index 197bc8633..6c644a0a8 100644 --- a/src/common/message_broker/backend/nats/NatsBackend.py +++ b/src/common/message_broker/backend/nats/NatsBackend.py @@ -20,10 +20,17 @@ from .._Backend import _Backend from .NatsBackendThread import NatsBackendThread DEFAULT_NATS_URI = 'nats://127.0.0.1:4222' +#NATS_URI_TEMPLATE = 'nats://{:s}:{:s}@nats.{:s}.svc.cluster.local:{:s}' # with authentication +NATS_URI_TEMPLATE = 'nats://nats.{:s}.svc.cluster.local:{:s}' class NatsBackend(_Backend): def __init__(self, **settings) -> None: # pylint: disable=super-init-not-called - nats_uri = get_setting('NATS_URI', settings=settings, default=DEFAULT_NATS_URI) + nats_namespace = get_setting('NATS_NAMESPACE', settings=settings) + nats_client_port = get_setting('NATS_CLIENT_PORT', settings=settings) + if nats_namespace is None or nats_client_port is None: + nats_uri = get_setting('NATS_URI', settings=settings, default=DEFAULT_NATS_URI) + else: + nats_uri = NATS_URI_TEMPLATE.format(nats_namespace, nats_client_port) self._terminate = threading.Event() self._nats_backend_thread = NatsBackendThread(nats_uri) self._nats_backend_thread.start() diff --git a/src/context/service/database/Engine.py b/src/context/service/database/Engine.py index c507efc72..a37ec0c1e 100644 --- a/src/context/service/database/Engine.py +++ b/src/context/service/database/Engine.py @@ -19,11 +19,20 @@ LOGGER = logging.getLogger(__name__) APP_NAME = 'tfs' ECHO = False # true: dump SQL commands and transactions executed +CRDB_URI_TEMPLATE = 'cockroachdb://{:s}:{:s}@cockroachdb-public.{:s}.svc.cluster.local:{:s}/{:s}?sslmode={:s}' class Engine: @staticmethod def get_engine() -> sqlalchemy.engine.Engine: - crdb_uri = get_setting('CRDB_URI') + CRDB_NAMESPACE = get_setting('CRDB_NAMESPACE') + CRDB_SQL_PORT = get_setting('CRDB_SQL_PORT') + CRDB_DATABASE = get_setting('CRDB_DATABASE') + CRDB_USERNAME = get_setting('CRDB_USERNAME') + CRDB_PASSWORD = get_setting('CRDB_PASSWORD') + CRDB_SSLMODE = get_setting('CRDB_SSLMODE') + + crdb_uri = CRDB_URI_TEMPLATE.format( + CRDB_USERNAME, CRDB_PASSWORD, CRDB_NAMESPACE, CRDB_SQL_PORT, CRDB_DATABASE, CRDB_SSLMODE) try: engine = sqlalchemy.create_engine( diff --git a/src/policy/src/main/resources/application.yml b/src/policy/src/main/resources/application.yml index 3d9927634..fa7dff7b2 100644 --- a/src/policy/src/main/resources/application.yml +++ b/src/policy/src/main/resources/application.yml @@ -34,9 +34,9 @@ quarkus: port: 8080 container-image: - group: teraflow-h2020 + group: tfs name: controller/policy - registry: registry.gitlab.com + registry: labs.etsi.org:5050 kubernetes: name: policyservice diff --git a/src/policy/target/kubernetes/kubernetes.yml b/src/policy/target/kubernetes/kubernetes.yml index 1a2b4e26c..51ed10e4a 100644 --- a/src/policy/target/kubernetes/kubernetes.yml +++ b/src/policy/target/kubernetes/kubernetes.yml @@ -57,7 +57,7 @@ spec: value: contextservice - name: SERVICE_SERVICE_HOST value: serviceservice - image: registry.gitlab.com/teraflow-h2020/controller/policy:0.1.0 + image: labs.etsi.org:5050/tfs/controller/policy:0.1.0 imagePullPolicy: Always livenessProbe: failureThreshold: 3 diff --git a/src/tests/oeccpsc22/deploy_in_kubernetes.sh b/src/tests/oeccpsc22/deploy_in_kubernetes.sh index 426e07e13..fffce0b76 100755 --- a/src/tests/oeccpsc22/deploy_in_kubernetes.sh +++ b/src/tests/oeccpsc22/deploy_in_kubernetes.sh @@ -22,7 +22,7 @@ export K8S_HOSTNAME="kubernetes-master" #export GRAFANA_PASSWORD="admin123+" # Constants -GITLAB_REPO_URL="registry.gitlab.com/teraflow-h2020/controller" +GITLAB_REPO_URL="labs.etsi.org:5050/tfs/controller" TMP_FOLDER="./tmp" # Create a tmp folder for files modified during the deployment -- GitLab From 5bcdcd6d1d56a5323250271f628521538f793a5d Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Thu, 26 Jan 2023 15:41:29 +0000 Subject: [PATCH 147/158] Deploy scripts: - corrected file names --- deploy/all.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) mode change 100644 => 100755 deploy/all.sh diff --git a/deploy/all.sh b/deploy/all.sh old mode 100644 new mode 100755 index 2be46a28b..c6da23366 --- a/deploy/all.sh +++ b/deploy/all.sh @@ -104,15 +104,15 @@ export NATS_REDEPLOY=${NATS_REDEPLOY:-""} ######################################################################################################################## # Deploy CockroachDB -./deploy/deploy_crdb.sh +./deploy/crdb.sh # Deploy NATS -./deploy/deploy_nats.sh +./deploy/nats.sh # Deploy TFS -./deploy/deploy_tfs.sh +./deploy/tfs.sh # Show deploy summary -./show_deploy.sh +./deploy/show.sh echo "Done!" -- GitLab From 73ee7ddca7484b2008cefb8ef70b599f95babd4e Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Thu, 26 Jan 2023 15:56:45 +0000 Subject: [PATCH 148/158] Deploy scripts: - moved creation of secrets from crdb.sh and nats.sh to tfs.sh for safety reasons --- deploy/crdb.sh | 38 -------------------------------------- deploy/nats.sh | 20 ++------------------ deploy/tfs.sh | 34 ++++++++++++++++++++++++++++++++++ my_deploy.sh | 19 ++++++------------- 4 files changed, 42 insertions(+), 69 deletions(-) diff --git a/deploy/crdb.sh b/deploy/crdb.sh index ecec39101..76aa07370 100755 --- a/deploy/crdb.sh +++ b/deploy/crdb.sh @@ -30,12 +30,6 @@ export CRDB_PASSWORD=${CRDB_PASSWORD:-"tfs123"} # If not already set, set the database name to be used by Context. export CRDB_DATABASE=${CRDB_DATABASE:-"tfs"} -# If not already set, set the name of the secret where CockroachDB data and credentials will be stored. -export CRDB_SECRET_NAME=${CRDB_SECRET_NAME:-"crdb-data"} - -# If not already set, set the namespace where the secret containing CockroachDB data and credentials will be stored. -export CRDB_SECRET_NAMESPACE=${CRDB_SECRET_NAMESPACE:-"tfs"} - # If not already set, set CockroachDB installation mode. Accepted values are: 'single' and 'cluster'. # "YES", the database pointed by variable CRDB_NAMESPACE will be dropped while # checking/deploying CockroachDB. @@ -138,25 +132,9 @@ function crdb_deploy_single() { PATCH='{"spec": {"template": {"spec": {"containers": ['${CONTAINER}']}}}}' kubectl patch daemonset nginx-ingress-microk8s-controller --namespace ingress --patch "${PATCH}" echo - - echo "Create secret with CockroachDB data" - kubectl create secret generic ${CRDB_SECRET_NAME} --namespace ${CRDB_SECRET_NAMESPACE} --type='Opaque' \ - --from-literal=CRDB_NAMESPACE=${CRDB_NAMESPACE} \ - --from-literal=CRDB_SQL_PORT=${CRDB_SQL_PORT} \ - --from-literal=CRDB_GUI_PORT=${CRDB_GUI_PORT} \ - --from-literal=CRDB_DATABASE=${CRDB_DATABASE} \ - --from-literal=CRDB_USERNAME=${CRDB_USERNAME} \ - --from-literal=CRDB_PASSWORD="'"${CRDB_PASSWORD}"'" \ - --from-literal=CRDB_SSLMODE=require - - kubectl get all --all-namespaces } function crdb_undeploy_single() { - echo "Delete secret with CockroachDB data" - kubectl delete secret ${CRDB_SECRET_NAME} --namespace ${CRDB_SECRET_NAMESPACE} --ignore-not-found - echo - echo "CockroachDB" echo ">>> Checking if CockroachDB is deployed..." if kubectl get --namespace ${CRDB_NAMESPACE} statefulset/cockroachdb &> /dev/null; then @@ -307,25 +285,9 @@ function crdb_deploy_cluster() { PATCH='{"spec": {"template": {"spec": {"containers": ['${CONTAINER}']}}}}' kubectl patch daemonset nginx-ingress-microk8s-controller --namespace ingress --patch "${PATCH}" echo - - echo "Create secret with CockroachDB data" - kubectl create secret generic ${CRDB_SECRET_NAME} --namespace ${CRDB_SECRET_NAMESPACE} --type='Opaque' \ - --from-literal=CRDB_NAMESPACE=${CRDB_NAMESPACE} \ - --from-literal=CRDB_SQL_PORT=${CRDB_SQL_PORT} \ - --from-literal=CRDB_GUI_PORT=${CRDB_GUI_PORT} \ - --from-literal=CRDB_DATABASE=${CRDB_DATABASE} \ - --from-literal=CRDB_USERNAME=${CRDB_USERNAME} \ - --from-literal=CRDB_PASSWORD="'"${CRDB_PASSWORD}"'" \ - --from-literal=CRDB_SSLMODE=require - - kubectl get all --all-namespaces } function crdb_undeploy_cluster() { - echo "Delete secret with CockroachDB data" - kubectl delete secret ${CRDB_SECRET_NAME} --namespace ${CRDB_SECRET_NAMESPACE} --ignore-not-found - echo - echo "CockroachDB Client" echo ">>> Checking if CockroachDB Client is deployed..." if kubectl get --namespace ${CRDB_NAMESPACE} pod/cockroachdb-client-secure &> /dev/null; then diff --git a/deploy/nats.sh b/deploy/nats.sh index 757b0984f..544028996 100755 --- a/deploy/nats.sh +++ b/deploy/nats.sh @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. + ######################################################################################################################## # Read deployment settings ######################################################################################################################## @@ -20,17 +21,12 @@ # If not already set, set the namespace where NATS will be deployed. export NATS_NAMESPACE=${NATS_NAMESPACE:-"nats"} -# If not already set, set the name of the secret where NATS data and credentials will be stored. -export NATS_SECRET_NAME=${NATS_SECRET_NAME:-"nats-data"} - -# If not already set, set the namespace where the secret containing NATS data and credentials will be stored. -export NATS_SECRET_NAMESPACE=${NATS_SECRET_NAMESPACE:-"tfs"} - # If not already set, disable flag for re-deploying NATS from scratch. # WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE MESSAGE BROKER INFORMATION! # If NATS_REDEPLOY is "YES", the message broker will be dropped while checking/deploying NATS. export NATS_REDEPLOY=${NATS_REDEPLOY:-""} + ######################################################################################################################## # Automated steps start here ######################################################################################################################## @@ -106,21 +102,9 @@ function nats_deploy_single() { PATCH='{"spec": {"template": {"spec": {"containers": ['${CONTAINER}']}}}}' kubectl patch daemonset nginx-ingress-microk8s-controller --namespace ingress --patch "${PATCH}" echo - - echo "Create secret with NATS data" - kubectl create secret generic ${NATS_SECRET_NAME} --namespace ${NATS_SECRET_NAMESPACE} --type='Opaque' \ - --from-literal=NATS_NAMESPACE=${NATS_NAMESPACE} \ - --from-literal=NATS_CLIENT_PORT=${NATS_CLIENT_PORT} \ - --from-literal=NATS_GUI_PORT=${NATS_GUI_PORT} - - kubectl get all --all-namespaces } function nats_undeploy_single() { - echo "Delete secret with NATS data" - kubectl delete secret ${NATS_SECRET_NAME} --namespace ${NATS_SECRET_NAMESPACE} --ignore-not-found - echo - echo "NATS" echo ">>> Checking if NATS is deployed..." if kubectl get --namespace ${NATS_NAMESPACE} statefulset/nats &> /dev/null; then diff --git a/deploy/tfs.sh b/deploy/tfs.sh index efa4875c6..8b172b0fc 100755 --- a/deploy/tfs.sh +++ b/deploy/tfs.sh @@ -42,6 +42,22 @@ export TFS_GRAFANA_PASSWORD=${TFS_GRAFANA_PASSWORD:-"admin123+"} # If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used. export TFS_SKIP_BUILD=${TFS_SKIP_BUILD:-""} +# If not already set, set the namespace where CockroackDB will be deployed. +export CRDB_NAMESPACE=${CRDB_NAMESPACE:-"crdb"} + +# If not already set, set the database username to be used by Context. +export CRDB_USERNAME=${CRDB_USERNAME:-"tfs"} + +# If not already set, set the database user's password to be used by Context. +export CRDB_PASSWORD=${CRDB_PASSWORD:-"tfs123"} + +# If not already set, set the database name to be used by Context. +export CRDB_DATABASE=${CRDB_DATABASE:-"tfs"} + +# If not already set, set the namespace where NATS will be deployed. +export NATS_NAMESPACE=${NATS_NAMESPACE:-"nats"} + + ######################################################################################################################## # Automated steps start here ######################################################################################################################## @@ -61,6 +77,24 @@ kubectl delete namespace $TFS_K8S_NAMESPACE kubectl create namespace $TFS_K8S_NAMESPACE printf "\n" +echo "Create secret with CockroachDB data" +CRDB_SQL_PORT=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}') +kubectl create secret generic crdb-data --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \ + --from-literal=CRDB_NAMESPACE=${CRDB_NAMESPACE} \ + --from-literal=CRDB_SQL_PORT=${CRDB_SQL_PORT} \ + --from-literal=CRDB_DATABASE=${CRDB_DATABASE} \ + --from-literal=CRDB_USERNAME=${CRDB_USERNAME} \ + --from-literal=CRDB_PASSWORD="'"${CRDB_PASSWORD}"'" \ + --from-literal=CRDB_SSLMODE=require +printf "\n" + +echo "Create secret with NATS data" +NATS_CLIENT_PORT=$(kubectl --namespace ${NATS_NAMESPACE} get service nats -o 'jsonpath={.spec.ports[?(@.name=="client")].port}') +kubectl create secret generic nats-data --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \ + --from-literal=NATS_NAMESPACE=${NATS_NAMESPACE} \ + --from-literal=NATS_CLIENT_PORT=${NATS_CLIENT_PORT} +printf "\n" + echo "Deploying components and collecting environment variables..." ENV_VARS_SCRIPT=tfs_runtime_env_vars.sh echo "# Environment variables for TeraFlowSDN deployment" > $ENV_VARS_SCRIPT diff --git a/my_deploy.sh b/my_deploy.sh index 41dce9bd5..8795f9e81 100644 --- a/my_deploy.sh +++ b/my_deploy.sh @@ -13,6 +13,9 @@ # See the License for the specific language governing permissions and # limitations under the License. + +# ----- TeraFlowSDN ------------------------------------------------------------ + # Set the URL of the internal MicroK8s Docker registry where the images will be uploaded to. export TFS_REGISTRY_IMAGES=${TFS_REGISTRY_IMAGES:-"http://localhost:32000/tfs/"} @@ -34,6 +37,7 @@ export TFS_GRAFANA_PASSWORD="admin123+" # Disable skip-build flag to rebuild the Docker images. export TFS_SKIP_BUILD="" +# ----- CockroachDB ------------------------------------------------------------ # Set the namespace where CockroackDB will be deployed. export CRDB_NAMESPACE="crdb" @@ -46,12 +50,6 @@ export CRDB_PASSWORD="tfs123" # Set the database name to be used by Context. export CRDB_DATABASE="tfs" -# Set the name of the secret where CockroachDB data and credentials will be stored. -export CRDB_SECRET_NAME="crdb-data" - -# Set the namespace where the secret containing CockroachDB data and credentials will be stored. -export CRDB_SECRET_NAMESPACE=${TFS_K8S_NAMESPACE} - # Set CockroachDB installation mode to 'single'. This option is convenient for development and testing. # See ./deploy/all.sh or ./deploy/crdb.sh for additional details export CRDB_DEPLOY_MODE="single" @@ -62,14 +60,9 @@ export CRDB_DROP_DATABASE_IF_EXISTS="" # Disable flag for re-deploying CockroachDB from scratch. export CRDB_REDEPLOY="" +# ----- NATS ------------------------------------------------------------------- # Set the namespace where NATS will be deployed. export NATS_NAMESPACE="nats" -# Set the name of the secret where NATS data and credentials will be stored. -export NATS_SECRET_NAME="nats-data" - -# Set the namespace where the secret containing NATS data and credentials will be stored. -export NATS_SECRET_NAMESPACE=${TFS_K8S_NAMESPACE} - # Disable flag for re-deploying NATS from scratch. -export NATS_REDEPLOY=${NATS_REDEPLOY:-""} +export NATS_REDEPLOY="" -- GitLab From 3e0618ace87d2414cd0427751648ccd1b3b80c15 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Thu, 26 Jan 2023 16:05:40 +0000 Subject: [PATCH 149/158] Readme: - minor cosmetic change --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 67f6895ce..88af03272 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,7 @@ # TeraFlowSDN Controller [ETSI OpenSource Group for TeraFlowSDN](https://tfs.etsi.org/) + Former, [Teraflow H2020 project](https://teraflow-h2020.eu/) - Secured autonomic traffic management for a Tera of SDN Flows Branch "master" : [](https://labs.etsi.org/rep/tfs/controller/-/commits/master) [](https://labs.etsi.org/rep/tfs/controller/-/commits/master) -- GitLab From 65ad5b0d99394d3d5ff4fdb138e32d695dd12ced Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Thu, 26 Jan 2023 16:08:06 +0000 Subject: [PATCH 150/158] Removed unneeded file --- INSTALL.md | 4 ---- 1 file changed, 4 deletions(-) delete mode 100644 INSTALL.md diff --git a/INSTALL.md b/INSTALL.md deleted file mode 100644 index 670af4873..000000000 --- a/INSTALL.md +++ /dev/null @@ -1,4 +0,0 @@ -# TeraFlow OS SDN Controller Installation Instructions -Assuming you have a running Kubernetes deployment installed following the instructions provided in [Wiki: Installing Kubernetes on your Linux machine](https://gitlab.com/teraflow-h2020/controller/-/wikis/Installing-Kubernetes-on-your-Linux-machine), the following instructions will let you deploy TeraFlow OS SDN Controller in your local Kubernetes environment. - -Then, follow the instructions in [Wiki: Deploying a TeraFlow OS test instance](https://gitlab.com/teraflow-h2020/controller/-/wikis/Deploying-a-TeraFlow-OS-test-instance) to deploy your instance of TeraFlow OS. -- GitLab From 44edfa8dd66a700a1a3f24aaef3709e4c62abc58 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Thu, 26 Jan 2023 16:19:44 +0000 Subject: [PATCH 151/158] Deploy scripts: - corrected secret creation in tfs.sh --- deploy/tfs.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/tfs.sh b/deploy/tfs.sh index 8b172b0fc..02e841fa3 100755 --- a/deploy/tfs.sh +++ b/deploy/tfs.sh @@ -84,7 +84,7 @@ kubectl create secret generic crdb-data --namespace ${TFS_K8S_NAMESPACE} --type= --from-literal=CRDB_SQL_PORT=${CRDB_SQL_PORT} \ --from-literal=CRDB_DATABASE=${CRDB_DATABASE} \ --from-literal=CRDB_USERNAME=${CRDB_USERNAME} \ - --from-literal=CRDB_PASSWORD="'"${CRDB_PASSWORD}"'" \ + --from-literal=CRDB_PASSWORD=${CRDB_PASSWORD} \ --from-literal=CRDB_SSLMODE=require printf "\n" -- GitLab From 8aad7b02e77bc4bf9760984538dfcb856639cb87 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Thu, 26 Jan 2023 16:50:32 +0000 Subject: [PATCH 152/158] Deploy scripts: - corrected manifest adaptation in tfs.sh --- deploy/tfs.sh | 125 ++++++++++++++++++++------------------------------ 1 file changed, 50 insertions(+), 75 deletions(-) diff --git a/deploy/tfs.sh b/deploy/tfs.sh index 02e841fa3..86043ee44 100755 --- a/deploy/tfs.sh +++ b/deploy/tfs.sh @@ -129,50 +129,48 @@ for COMPONENT in $TFS_COMPONENTS; do docker build -t "$COMPONENT:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/Dockerfile . > "$BUILD_LOG" fi - if [ -n "$TFS_REGISTRY_IMAGES" ]; then - echo " Pushing Docker image to '$TFS_REGISTRY_IMAGES'..." + echo " Pushing Docker image to '$TFS_REGISTRY_IMAGES'..." - if [ "$COMPONENT" == "pathcomp" ]; then - IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-frontend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + if [ "$COMPONENT" == "pathcomp" ]; then + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-frontend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') - TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-frontend.log" - docker tag "$COMPONENT-frontend:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG" + TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-frontend.log" + docker tag "$COMPONENT-frontend:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG" - PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-frontend.log" - docker push "$IMAGE_URL" > "$PUSH_LOG" + PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-frontend.log" + docker push "$IMAGE_URL" > "$PUSH_LOG" - IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-backend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-backend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') - TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-backend.log" - docker tag "$COMPONENT-backend:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG" + TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-backend.log" + docker tag "$COMPONENT-backend:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG" - PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-backend.log" - docker push "$IMAGE_URL" > "$PUSH_LOG" - elif [ "$COMPONENT" == "dlt" ]; then - IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-connector:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-backend.log" + docker push "$IMAGE_URL" > "$PUSH_LOG" + elif [ "$COMPONENT" == "dlt" ]; then + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-connector:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') - TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-connector.log" - docker tag "$COMPONENT-connector:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG" + TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-connector.log" + docker tag "$COMPONENT-connector:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG" - PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-connector.log" - docker push "$IMAGE_URL" > "$PUSH_LOG" + PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-connector.log" + docker push "$IMAGE_URL" > "$PUSH_LOG" - IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-gateway:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-gateway:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') - TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-gateway.log" - docker tag "$COMPONENT-gateway:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG" + TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-gateway.log" + docker tag "$COMPONENT-gateway:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG" - PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-gateway.log" - docker push "$IMAGE_URL" > "$PUSH_LOG" - else - IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-gateway.log" + docker push "$IMAGE_URL" > "$PUSH_LOG" + else + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') - TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}.log" - docker tag "$COMPONENT:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG" + TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}.log" + docker tag "$COMPONENT:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG" - PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}.log" - docker push "$IMAGE_URL" > "$PUSH_LOG" - fi + PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}.log" + docker push "$IMAGE_URL" > "$PUSH_LOG" fi fi @@ -180,53 +178,30 @@ for COMPONENT in $TFS_COMPONENTS; do MANIFEST="$TMP_MANIFESTS_FOLDER/${COMPONENT}service.yaml" cp ./manifests/"${COMPONENT}"service.yaml "$MANIFEST" - if [ -n "$TFS_REGISTRY_IMAGES" ]; then - # Registry is set - if [ "$COMPONENT" == "pathcomp" ]; then - IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-frontend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') - VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-frontend:" "$MANIFEST" | cut -d ":" -f3) - sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-frontend:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST" - - IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-backend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') - VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-backend:" "$MANIFEST" | cut -d ":" -f3) - sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-backend:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST" - elif [ "$COMPONENT" == "dlt" ]; then - IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-connector:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') - VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-connector:" "$MANIFEST" | cut -d ":" -f3) - sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-connector:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST" - - IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-gateway:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') - VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-gateway:" "$MANIFEST" | cut -d ":" -f3) - sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-gateway:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST" - else - IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') - VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f3) - sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST" - fi - - sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Always#g" "$MANIFEST" + if [ "$COMPONENT" == "pathcomp" ]; then + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-frontend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-frontend:" "$MANIFEST" | cut -d ":" -f4) + sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-frontend:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST" + + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-backend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-backend:" "$MANIFEST" | cut -d ":" -f4) + sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-backend:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST" + elif [ "$COMPONENT" == "dlt" ]; then + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-connector:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-connector:" "$MANIFEST" | cut -d ":" -f4) + sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-connector:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST" + + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-gateway:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-gateway:" "$MANIFEST" | cut -d ":" -f4) + sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-gateway:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST" else - # Registry is not set - if [ "$COMPONENT" == "pathcomp" ]; then - VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-frontend:" "$MANIFEST" | cut -d ":" -f3) - sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-frontend:${VERSION}#image: $COMPONENT-frontend:$TFS_IMAGE_TAG#g" "$MANIFEST" - - VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-backend:" "$MANIFEST" | cut -d ":" -f3) - sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-backend:${VERSION}#image: $COMPONENT-backend:$TFS_IMAGE_TAG#g" "$MANIFEST" - elif [ "$COMPONENT" == "dlt" ]; then - VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-connector:" "$MANIFEST" | cut -d ":" -f3) - sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-connector:${VERSION}#image: $COMPONENT-connector:$TFS_IMAGE_TAG#g" "$MANIFEST" - - VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-gateway:" "$MANIFEST" | cut -d ":" -f3) - sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-gateway:${VERSION}#image: $COMPONENT-gateway:$TFS_IMAGE_TAG#g" "$MANIFEST" - else - VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f3) - sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT:${VERSION}#image: $COMPONENT:$TFS_IMAGE_TAG#g" "$MANIFEST" - fi - - sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Never#g" "$MANIFEST" + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f4) + sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST" fi + sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Always#g" "$MANIFEST" + # TODO: harmonize names of the monitoring component echo " Deploying '$COMPONENT' component to Kubernetes..." -- GitLab From 0862e48b0a629c73d640c00275ec37aab937c567 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Thu, 26 Jan 2023 16:52:53 +0000 Subject: [PATCH 153/158] My-Deploy example script: - added load generator --- my_deploy.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/my_deploy.sh b/my_deploy.sh index 8795f9e81..644904f8b 100644 --- a/my_deploy.sh +++ b/my_deploy.sh @@ -20,7 +20,7 @@ export TFS_REGISTRY_IMAGES=${TFS_REGISTRY_IMAGES:-"http://localhost:32000/tfs/"} # Set the list of components, separated by spaces, you want to build images for, and deploy. -export TFS_COMPONENTS="context device automation monitoring pathcomp service slice compute webui" +export TFS_COMPONENTS="context device automation monitoring pathcomp service slice compute webui load_generator" # Set the tag you want to use for your images. export TFS_IMAGE_TAG="dev" -- GitLab From dd1fe0ca1d04ee47a6d0391ea583fe7be8fefa9b Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Thu, 26 Jan 2023 16:53:14 +0000 Subject: [PATCH 154/158] LoadGenerator component: - restored default parameters --- .../service/LoadGeneratorServiceServicerImpl.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/load_generator/service/LoadGeneratorServiceServicerImpl.py b/src/load_generator/service/LoadGeneratorServiceServicerImpl.py index 4957625bc..67158f1bf 100644 --- a/src/load_generator/service/LoadGeneratorServiceServicerImpl.py +++ b/src/load_generator/service/LoadGeneratorServiceServicerImpl.py @@ -28,7 +28,7 @@ class LoadGeneratorServiceServicerImpl(LoadGeneratorServiceServicer): def __init__(self): LOGGER.debug('Creating Servicer...') self._parameters = Parameters( - num_requests = 1, + num_requests = 100, request_types = [ RequestType.SERVICE_L2NM, RequestType.SERVICE_L3NM, @@ -39,7 +39,7 @@ class LoadGeneratorServiceServicerImpl(LoadGeneratorServiceServicer): ], offered_load = 50, holding_time = 10, - do_teardown = False, + do_teardown = True, dry_mode = False, # in dry mode, no request is sent to TeraFlowSDN record_to_dlt = False, # if record_to_dlt, changes in device/link/service/slice are uploaded to DLT dlt_domain_id = 'dlt-perf-eval', # domain used to uploaded entities, ignored when record_to_dlt = False -- GitLab From c35ebb5899ec4f98aed057d60bd667dba09e6a85 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Thu, 26 Jan 2023 16:58:38 +0000 Subject: [PATCH 155/158] Manifests: - updated log level to INFO --- manifests/contextservice.yaml | 2 +- manifests/deviceservice.yaml | 2 +- manifests/serviceservice.yaml | 2 +- manifests/sliceservice.yaml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/manifests/contextservice.yaml b/manifests/contextservice.yaml index 3bb1a01d9..805808d5d 100644 --- a/manifests/contextservice.yaml +++ b/manifests/contextservice.yaml @@ -38,7 +38,7 @@ spec: - name: MB_BACKEND value: "nats" - name: LOG_LEVEL - value: "DEBUG" + value: "INFO" envFrom: - secretRef: name: crdb-data diff --git a/manifests/deviceservice.yaml b/manifests/deviceservice.yaml index 5c72263eb..3580df088 100644 --- a/manifests/deviceservice.yaml +++ b/manifests/deviceservice.yaml @@ -36,7 +36,7 @@ spec: - containerPort: 9192 env: - name: LOG_LEVEL - value: "DEBUG" + value: "INFO" readinessProbe: exec: command: ["/bin/grpc_health_probe", "-addr=:2020"] diff --git a/manifests/serviceservice.yaml b/manifests/serviceservice.yaml index 561b204cb..02c9e25db 100644 --- a/manifests/serviceservice.yaml +++ b/manifests/serviceservice.yaml @@ -36,7 +36,7 @@ spec: - containerPort: 9192 env: - name: LOG_LEVEL - value: "DEBUG" + value: "INFO" readinessProbe: exec: command: ["/bin/grpc_health_probe", "-addr=:3030"] diff --git a/manifests/sliceservice.yaml b/manifests/sliceservice.yaml index ca7641de3..56c5eb1b5 100644 --- a/manifests/sliceservice.yaml +++ b/manifests/sliceservice.yaml @@ -36,7 +36,7 @@ spec: - containerPort: 9192 env: - name: LOG_LEVEL - value: "DEBUG" + value: "INFO" readinessProbe: exec: command: ["/bin/grpc_health_probe", "-addr=:4040"] -- GitLab From 478b88e339d5b044267d0e70d68c79baa2da8630 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Thu, 26 Jan 2023 17:08:44 +0000 Subject: [PATCH 156/158] Manifests: - activated Grafana in WebUI - disabled Context debug endpoint in Ingress controller --- manifests/nginx_ingress_http.yaml | 14 +++--- manifests/webuiservice.yaml | 80 +++++++++++++++---------------- 2 files changed, 47 insertions(+), 47 deletions(-) diff --git a/manifests/nginx_ingress_http.yaml b/manifests/nginx_ingress_http.yaml index 50ff81c79..fe262d328 100644 --- a/manifests/nginx_ingress_http.yaml +++ b/manifests/nginx_ingress_http.yaml @@ -22,13 +22,13 @@ spec: name: webuiservice port: number: 3000 - - path: /context(/|$)(.*) - pathType: Prefix - backend: - service: - name: contextservice - port: - number: 8080 + #- path: /context(/|$)(.*) + # pathType: Prefix + # backend: + # service: + # name: contextservice + # port: + # number: 8080 - path: /()(restconf/.*) pathType: Prefix backend: diff --git a/manifests/webuiservice.yaml b/manifests/webuiservice.yaml index 3828ee419..d0a64871a 100644 --- a/manifests/webuiservice.yaml +++ b/manifests/webuiservice.yaml @@ -60,43 +60,43 @@ spec: limits: cpu: 700m memory: 1024Mi - #- name: grafana - # image: grafana/grafana:8.5.11 - # imagePullPolicy: IfNotPresent - # ports: - # - containerPort: 3000 - # name: http-grafana - # protocol: TCP - # env: - # - name: GF_SERVER_ROOT_URL - # value: "http://0.0.0.0:3000/grafana/" - # - name: GF_SERVER_SERVE_FROM_SUB_PATH - # value: "true" - # readinessProbe: - # failureThreshold: 3 - # httpGet: - # path: /robots.txt - # port: 3000 - # scheme: HTTP - # initialDelaySeconds: 10 - # periodSeconds: 30 - # successThreshold: 1 - # timeoutSeconds: 2 - # livenessProbe: - # failureThreshold: 3 - # initialDelaySeconds: 30 - # periodSeconds: 10 - # successThreshold: 1 - # tcpSocket: - # port: 3000 - # timeoutSeconds: 1 - # resources: - # requests: - # cpu: 250m - # memory: 750Mi - # limits: - # cpu: 700m - # memory: 1024Mi + - name: grafana + image: grafana/grafana:8.5.11 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 3000 + name: http-grafana + protocol: TCP + env: + - name: GF_SERVER_ROOT_URL + value: "http://0.0.0.0:3000/grafana/" + - name: GF_SERVER_SERVE_FROM_SUB_PATH + value: "true" + readinessProbe: + failureThreshold: 3 + httpGet: + path: /robots.txt + port: 3000 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 2 + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + tcpSocket: + port: 3000 + timeoutSeconds: 1 + resources: + requests: + cpu: 250m + memory: 750Mi + limits: + cpu: 700m + memory: 1024Mi --- apiVersion: v1 kind: Service @@ -110,6 +110,6 @@ spec: - name: webui port: 8004 targetPort: 8004 - #- name: grafana - # port: 3000 - # targetPort: 3000 + - name: grafana + port: 3000 + targetPort: 3000 -- GitLab From 963a4a6f3d43b0c1df88e62545c954caa32a351b Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Thu, 26 Jan 2023 17:14:43 +0000 Subject: [PATCH 157/158] Scripts: - adapted cockroachdb_client script to be usable in cluster and single mode --- scripts/cockroachdb_client.sh | 37 ++++++++++++++++++++++++++++++++++- 1 file changed, 36 insertions(+), 1 deletion(-) diff --git a/scripts/cockroachdb_client.sh b/scripts/cockroachdb_client.sh index 6ac9eea6e..edd979446 100755 --- a/scripts/cockroachdb_client.sh +++ b/scripts/cockroachdb_client.sh @@ -13,4 +13,39 @@ # See the License for the specific language governing permissions and # limitations under the License. -kubectl exec -it cockroachdb-client-secure --namespace crdb -- ./cockroach sql --certs-dir=/cockroach/cockroach-certs --host=cockroachdb-public + +######################################################################################################################## +# Read deployment settings +######################################################################################################################## + +# If not already set, set the namespace where CockroackDB will be deployed. +export CRDB_NAMESPACE=${CRDB_NAMESPACE:-"crdb"} + +# If not already set, set the database username to be used by Context. +export CRDB_USERNAME=${CRDB_USERNAME:-"tfs"} + +# If not already set, set the database user's password to be used by Context. +export CRDB_PASSWORD=${CRDB_PASSWORD:-"tfs123"} + +# If not already set, set the database name to be used by Context. +export CRDB_DATABASE=${CRDB_DATABASE:-"tfs"} + +# If not already set, set CockroachDB installation mode. Accepted values are: 'single' and 'cluster'. +export CRDB_DEPLOY_MODE=${CRDB_DEPLOY_MODE:-"single"} + + +######################################################################################################################## +# Automated steps start here +######################################################################################################################## + +if [ "$CRDB_DEPLOY_MODE" == "single" ]; then + CRDB_SQL_PORT=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}') + CRDB_CLIENT_URL="postgresql://${CRDB_USERNAME}:${CRDB_PASSWORD}@cockroachdb-0:${CRDB_SQL_PORT}/defaultdb?sslmode=require" + kubectl exec -it --namespace ${CRDB_NAMESPACE} cockroachdb-0 -- \ + ./cockroach sql --certs-dir=/cockroach/cockroach-certs --url=${CRDB_CLIENT_URL} +elif [ "$CRDB_DEPLOY_MODE" == "cluster" ]; then + kubectl exec -it --namespace ${CRDB_NAMESPACE} cockroachdb-client-secure -- \ + ./cockroach sql --certs-dir=/cockroach/cockroach-certs --host=cockroachdb-public +else + echo "Unsupported value: CRDB_DEPLOY_MODE=$CRDB_DEPLOY_MODE" +fi -- GitLab From fa153b65507328a2f12b722920df38fc4970ff89 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Thu, 26 Jan 2023 18:08:18 +0000 Subject: [PATCH 158/158] CI/CD pipeline - removed unneeded stages --- .gitlab-ci.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 242f0b60d..45d4056c6 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -16,8 +16,6 @@ stages: #- dependencies - build - - build - - test - unit_test #- deploy #- end2end_test -- GitLab