Skip to content
Snippets Groups Projects
Commit a5a3da5d authored by Lluis Gifre Renom's avatar Lluis Gifre Renom
Browse files

Merge branch 'feat/context-service' into 'master'

Integrate initial release of Context service

See merge request teraflow-h2020/controller!3
parents 29231f0d d014b96b
No related branches found
No related tags found
No related merge requests found
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from context.proto import context_pb2 as context__pb2
class ContextServiceStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetTopology = channel.unary_unary(
'/context.ContextService/GetTopology',
request_serializer=context__pb2.Empty.SerializeToString,
response_deserializer=context__pb2.Topology.FromString,
)
class ContextServiceServicer(object):
"""Missing associated documentation comment in .proto file."""
def GetTopology(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ContextServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetTopology': grpc.unary_unary_rpc_method_handler(
servicer.GetTopology,
request_deserializer=context__pb2.Empty.FromString,
response_serializer=context__pb2.Topology.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'context.ContextService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class ContextService(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def GetTopology(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/context.ContextService/GetTopology',
context__pb2.Empty.SerializeToString,
context__pb2.Topology.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
grpcio-health-checking
grpcio
prometheus-client
pytest
pytest-benchmark
redis
#!/bin/bash
# Make folder containing the script the root folder for its execution
cd $(dirname $0)
ENDPOINT=($(kubectl --namespace teraflow-development get service contextservice -o 'jsonpath={.spec.clusterIP} {.spec.ports[?(@.name=="grpc")].port}'))
docker run -it --env TEST_TARGET_ADDRESS=${ENDPOINT[0]} --env TEST_TARGET_PORT=${ENDPOINT[1]} context_service:test
#!/bin/bash
# Make folder containing the script the root folder for its execution
cd $(dirname $0)
mkdir -p data
pytest -v --log-level=DEBUG tests/test_unitary.py
import grpc
import logging
from concurrent import futures
from grpc_health.v1.health import HealthServicer, OVERALL_HEALTH
from grpc_health.v1.health_pb2 import HealthCheckResponse
from grpc_health.v1.health_pb2_grpc import add_HealthServicer_to_server
from context.proto.context_pb2_grpc import add_ContextServiceServicer_to_server
from context.service.ContextServiceServicerImpl import ContextServiceServicerImpl
from context.Config import SERVICE_PORT, MAX_WORKERS, GRACE_PERIOD
BIND_ADDRESS = '0.0.0.0'
LOGGER = logging.getLogger(__name__)
class ContextService:
def __init__(self, database, address=BIND_ADDRESS, port=SERVICE_PORT, max_workers=MAX_WORKERS,
grace_period=GRACE_PERIOD):
self.database = database
self.address = address
self.port = port
self.endpoint = None
self.max_workers = max_workers
self.grace_period = grace_period
self.context_servicer = None
self.health_servicer = None
self.pool = None
self.server = None
def start(self):
self.endpoint = '{}:{}'.format(self.address, self.port)
LOGGER.debug('Starting Service (tentative endpoint: {}, max_workers: {})...'.format(
self.endpoint, self.max_workers))
self.pool = futures.ThreadPoolExecutor(max_workers=self.max_workers)
self.server = grpc.server(self.pool) # , interceptors=(tracer_interceptor,))
self.context_servicer = ContextServiceServicerImpl(self.database)
add_ContextServiceServicer_to_server(self.context_servicer, self.server)
self.health_servicer = HealthServicer(
experimental_non_blocking=True, experimental_thread_pool=futures.ThreadPoolExecutor(max_workers=1))
add_HealthServicer_to_server(self.health_servicer, self.server)
port = self.server.add_insecure_port(self.endpoint)
self.endpoint = '{}:{}'.format(self.address, port)
LOGGER.info('Listening on {}...'.format(self.endpoint))
self.server.start()
self.health_servicer.set(OVERALL_HEALTH, HealthCheckResponse.SERVING) # pylint: disable=maybe-no-member
LOGGER.debug('Service started')
def stop(self):
LOGGER.debug('Stopping service (grace period {} seconds)...'.format(self.grace_period))
self.health_servicer.enter_graceful_shutdown()
self.server.stop(self.grace_period)
LOGGER.debug('Service stopped')
import grpc, logging
from prometheus_client import Counter, Histogram
from context.proto.context_pb2 import Topology
from context.proto.context_pb2_grpc import ContextServiceServicer
LOGGER = logging.getLogger(__name__)
GETTOPOLOGY_COUNTER_STARTED = Counter ('context_gettopology_counter_started',
'Context:GetTopology counter of requests started' )
GETTOPOLOGY_COUNTER_COMPLETED = Counter ('context_gettopology_counter_completed',
'Context:GetTopology counter of requests completed')
GETTOPOLOGY_COUNTER_FAILED = Counter ('context_gettopology_counter_failed',
'Context:GetTopology counter of requests failed' )
GETTOPOLOGY_HISTOGRAM_DURATION = Histogram('context_gettopology_histogram_duration',
'Context:GetTopology histogram of request duration')
class ContextServiceServicerImpl(ContextServiceServicer):
def __init__(self, database):
LOGGER.debug('Creating Servicer...')
self.database = database
LOGGER.debug('Servicer Created')
@GETTOPOLOGY_HISTOGRAM_DURATION.time()
def GetTopology(self, request, context):
# request=Empty(), returns=Topology()
GETTOPOLOGY_COUNTER_STARTED.inc()
try:
LOGGER.debug('GetTopology request: {}'.format(str(request)))
reply = Topology(**self.database.get_topology())
LOGGER.debug('GetTopology reply: {}'.format(str(reply)))
GETTOPOLOGY_COUNTER_COMPLETED.inc()
return reply
except:
LOGGER.exception('GetTopology exception')
GETTOPOLOGY_COUNTER_FAILED.inc()
context.set_code(grpc.StatusCode.INTERNAL)
return Topology()
import logging, os, signal, sys, threading
from prometheus_client import start_http_server
from common.database.Factory import get_database
from context.service.ContextService import ContextService
from context.Config import SERVICE_PORT, MAX_WORKERS, GRACE_PERIOD, LOG_LEVEL, METRICS_PORT
terminate = threading.Event()
logger = None
def signal_handler(signal, frame):
global terminate, logger
logger.warning('Terminate signal received')
terminate.set()
def main():
global terminate, logger
service_port = os.environ.get('CONTEXTSERVICE_SERVICE_PORT_GRPC', SERVICE_PORT)
max_workers = os.environ.get('MAX_WORKERS', MAX_WORKERS )
grace_period = os.environ.get('GRACE_PERIOD', GRACE_PERIOD)
log_level = os.environ.get('LOG_LEVEL', LOG_LEVEL )
metrics_port = os.environ.get('METRICS_PORT', METRICS_PORT)
logging.basicConfig(level=log_level)
logger = logging.getLogger(__name__)
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
logger.info('Starting...')
# Start metrics server
start_http_server(metrics_port)
# Get database instance
database = get_database()
# Starting context service
service = ContextService(database, port=service_port, max_workers=max_workers, grace_period=grace_period)
service.start()
# Wait for Ctrl+C or termination signal
while not terminate.wait(0.1): pass
logger.info('Terminating...')
service.stop()
logger.info('Bye')
return(0)
if __name__ == '__main__':
sys.exit(main())
import logging, os, pytest, sys
from pathlib import Path
sys.path.append(__file__.split('src')[0] + 'src')
print(sys.path)
from context.client.ContextClient import ContextClient
from context.proto.context_pb2 import Empty
from .tools.ValidateTopology import validate_topology_dict
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.DEBUG)
@pytest.fixture(scope='session')
def remote_context_client():
address = os.environ.get('TEST_TARGET_ADDRESS')
if(address is None): raise Exception('EnvironmentVariable(TEST_TARGET_ADDRESS) not specified')
port = os.environ.get('TEST_TARGET_PORT')
if(port is None): raise Exception('EnvironmentVariable(TEST_TARGET_PORT) not specified')
return ContextClient(address=address, port=port)
def test_remote_get_topology(remote_context_client):
response = remote_context_client.GetTopology(Empty())
validate_topology_dict(response)
import logging, pytest, sys
from pathlib import Path
sys.path.append(__file__.split('src')[0] + 'src')
print(sys.path)
from context.client.ContextClient import ContextClient
from context.database.Factory import get_database, DatabaseEngineEnum
from context.proto.context_pb2 import Empty
from context.service.ContextService import ContextService
from context.Config import SERVICE_PORT, MAX_WORKERS, GRACE_PERIOD
from context.tests.tools.ValidateTopology import validate_topology_dict
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.DEBUG)
@pytest.fixture(scope='session')
def local_context_service():
database = get_database(engine=DatabaseEngineEnum.INMEMORY, filepath='data/topo_nsfnet.json')
_service = ContextService(database, port=SERVICE_PORT, max_workers=MAX_WORKERS, grace_period=GRACE_PERIOD)
_service.start()
yield _service
_service.stop()
@pytest.fixture(scope='session')
def local_context_client(local_context_service):
return ContextClient(address='127.0.0.1', port=SERVICE_PORT)
def test_local_get_topology(local_context_client):
response = local_context_client.GetTopology(Empty())
validate_topology_dict(response)
def validate_topology_dict(topology):
assert type(topology) is dict
assert len(topology.keys()) > 0
assert 'topoId' in topology
assert 'device' in topology
assert 'link' in topology
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment