diff --git a/manifests/deviceservice.yaml b/manifests/deviceservice.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f8f82b1ff4f42ec2cf0902c47c02eec748647246
--- /dev/null
+++ b/manifests/deviceservice.yaml
@@ -0,0 +1,53 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: deviceservice
+spec:
+  selector:
+    matchLabels:
+      app: deviceservice
+  template:
+    metadata:
+      labels:
+        app: deviceservice
+    spec:
+      terminationGracePeriodSeconds: 5
+      containers:
+      - name: server
+        image: device:dockerfile
+        imagePullPolicy: Never
+        ports:
+        - containerPort: 7070
+        env:
+        - name: DB_ENGINE
+          value: "redis"
+        - name: REDISDB_DATABASE_ID
+          value: "0"
+        - name: LOG_LEVEL
+          value: "DEBUG"
+        readinessProbe:
+          exec:
+            command: ["/bin/grpc_health_probe", "-addr=:7070"]
+        livenessProbe:
+          exec:
+            command: ["/bin/grpc_health_probe", "-addr=:7070"]
+        resources:
+          requests:
+            cpu: 250m
+            memory: 512Mi
+          limits:
+            cpu: 700m
+            memory: 1024Mi
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: deviceservice
+spec:
+  type: ClusterIP
+  selector:
+    app: deviceservice
+  ports:
+  - name: grpc
+    port: 7070
+    targetPort: 7070
diff --git a/src/common/database/redis/context_api/Context.py b/src/common/database/redis/context_api/Context.py
new file mode 100644
index 0000000000000000000000000000000000000000..b9f2486c0f6fdcd08873413c10c3cd28f2c4e590
--- /dev/null
+++ b/src/common/database/redis/context_api/Context.py
@@ -0,0 +1,37 @@
+from redis.client import Redis
+from .tools._Entity import _Entity
+from .tools.EntityAttributes import EntityAttributes
+from .tools.EntityCollection import EntityCollection
+from .tools.Mutex import Mutex
+from .Keys import KEY_CONTEXT, KEY_TOPOLOGIES
+from .Topology import Topology
+
+VALIDATORS = {}
+
+class Context(_Entity):
+    def __init__(self, context_uuid : str, redis_client: Redis):
+        self.__redis_client = redis_client
+        self.__mutex = Mutex(self.__redis_client)
+        self.__acquired = False
+        self.__owner_key = None
+        self.__entity_key = KEY_CONTEXT.format(context_uuid=context_uuid)
+        super().__init__(context_uuid, self)
+        self.context_uuid = self.get_uuid()
+        self.attributes = EntityAttributes(self, KEY_CONTEXT, validators=VALIDATORS)
+        self.topologies = EntityCollection(self, KEY_TOPOLOGIES)
+
+    def get_parent(self): return(self)
+    def get_context(self): return(self)
+    def get_redis_client(self): return(self.__redis_client)
+
+    def __enter__(self):
+        self.__acquired,self.__owner_key = self.__mutex.acquire(self.__entity_key, owner_key=self.__owner_key)
+        if not self.__acquired: raise Exception('Unable to acquire {}'.format(self.__entity_key))
+        return self
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        if not self.__acquired: return
+        self.__mutex.release(self.__entity_key, self.__owner_key)
+
+    def topology(self, topology_uuid : str) -> Topology:
+        return Topology(topology_uuid, self)
diff --git a/src/common/database/redis/context_api/Device.py b/src/common/database/redis/context_api/Device.py
new file mode 100644
index 0000000000000000000000000000000000000000..4a7a2097fbe8a4b52a3fcd6f3f8a2a94710e6701
--- /dev/null
+++ b/src/common/database/redis/context_api/Device.py
@@ -0,0 +1,63 @@
+from typing import Dict
+from .tools._Entity import _Entity
+from .tools.EntityAttributes import EntityAttributes
+from .tools.EntityCollection import EntityCollection
+from .Keys import KEY_DEVICE, KEY_DEVICE_ENDPOINTS
+from .Endpoint import Endpoint
+from .OperationalStatus import OperationalStatus, to_operationalstatus_enum
+
+VALIDATORS = {
+    'device_type': lambda v: v is None or isinstance(v, str),
+    'device_config': lambda v: v is None or isinstance(v, str),
+    'device_operational_status': lambda v: v is None or isinstance(v, OperationalStatus),
+}
+
+TRANSCODERS = {
+    'device_operational_status': {
+        OperationalStatus: lambda v: v.value,
+        str              : lambda v: to_operationalstatus_enum(v),
+    }
+}
+
+class Device(_Entity):
+    def __init__(self, device_uuid : str, parent : 'Topology'): # type: ignore
+        super().__init__(device_uuid, parent=parent)
+        self.device_uuid = self.get_uuid()
+        self.topology_uuid = self._parent.get_uuid()
+        self.context_uuid = self._parent._parent.get_uuid()
+        self.attributes = EntityAttributes(self, KEY_DEVICE, VALIDATORS, transcoders=TRANSCODERS)
+        self.endpoints = EntityCollection(self, KEY_DEVICE_ENDPOINTS)
+
+    def endpoint(self, endpoint_uuid : str) -> Endpoint:
+        return Endpoint(endpoint_uuid, self)
+
+    def create(self, type : str, config : str, operational_status : OperationalStatus) -> 'Device':
+        self.update(update_attributes={
+            'device_type': type,
+            'device_config': config,
+            'device_operational_status': operational_status,
+        })
+        self._parent.devices.add(self.get_uuid())
+        return self
+
+    def update(self, update_attributes={}, remove_attributes=[]) -> 'Device':
+        self.attributes.update(update_attributes=update_attributes, remove_attributes=remove_attributes)
+        return self
+
+    def delete(self) -> None:
+        remove_attributes = ['device_type', 'device_config', 'device_operational_status']
+        self.update(remove_attributes=remove_attributes)
+        self._parent.devices.delete(self.get_uuid())
+
+    def dump(self) -> Dict:
+        attributes = self.attributes.get()
+        dev_op_status = attributes.get('device_operational_status', None)
+        if isinstance(dev_op_status, OperationalStatus): dev_op_status = dev_op_status.value
+        endpoints = [self.endpoint(endpoint_uuid).dump() for endpoint_uuid in self.endpoints.get()]
+        return {
+            'device_id': {'device_id': {'uuid': self.device_uuid}},
+            'device_type': attributes.get('device_type', None),
+            'device_config': {'device_config': attributes.get('device_config', None)},
+            'devOperationalStatus': dev_op_status,
+            'endpointList': endpoints
+        }
diff --git a/src/common/database/redis/context_api/Endpoint.py b/src/common/database/redis/context_api/Endpoint.py
new file mode 100644
index 0000000000000000000000000000000000000000..66a954aec38ff6664b84841c73008fcd3941f88b
--- /dev/null
+++ b/src/common/database/redis/context_api/Endpoint.py
@@ -0,0 +1,47 @@
+from typing import Dict
+from .tools._Entity import _Entity
+from .tools.EntityAttributes import EntityAttributes
+from .Keys import KEY_ENDPOINT
+
+VALIDATORS = {
+    'port_type': lambda v: v is None or isinstance(v, str),
+}
+
+class Endpoint(_Entity):
+    def __init__(self, endpoint_uuid : str, parent : 'Device'): # type: ignore
+        super().__init__(endpoint_uuid, parent=parent)
+        self.endpoint_uuid = self.get_uuid()
+        self.device_uuid = self._parent.get_uuid()
+        self.topology_uuid = self._parent._parent.get_uuid()
+        self.context_uuid = self._parent._parent._parent.get_uuid()
+        self.attributes = EntityAttributes(self, KEY_ENDPOINT, VALIDATORS)
+
+    def create(self, type : str) -> 'Endpoint':
+        self.update(update_attributes={
+            'port_type': type
+        })
+        self._parent.endpoints.add(self.get_uuid())
+        return self
+
+    def update(self, update_attributes={}, remove_attributes=[]) -> 'Endpoint':
+        self.attributes.update(update_attributes=update_attributes, remove_attributes=remove_attributes)
+        return self
+
+    def delete(self) -> None:
+        remove_attributes = ['port_type']
+        self.update(remove_attributes=remove_attributes)
+        self._parent.endpoints.delete(self.get_uuid())
+
+    def dump_uuid(self) -> Dict:
+        return {
+            'topoId': {'uuid': self.topology_uuid},
+            'dev_id': {'uuid': self.device_uuid},
+            'port_id': {'uuid': self.endpoint_uuid},
+        }
+
+    def dump(self) -> Dict:
+        attributes = self.attributes.get()
+        return {
+            'port_id': self.dump_uuid(),
+            'port_type': attributes.get('port_type', None),
+        }
diff --git a/src/common/database/redis/context_api/Keys.py b/src/common/database/redis/context_api/Keys.py
new file mode 100644
index 0000000000000000000000000000000000000000..1c107ec1509949ef8b2f91a3e7e4505846fdf62a
--- /dev/null
+++ b/src/common/database/redis/context_api/Keys.py
@@ -0,0 +1,11 @@
+KEY_CONTEXT          =                'context[{context_uuid}]'
+KEY_TOPOLOGIES       = KEY_CONTEXT  + '/topologies{container_name}'
+KEY_TOPOLOGY         = KEY_CONTEXT  + '/topology[{topology_uuid}]' 
+KEY_DEVICES          = KEY_TOPOLOGY + '/devices{container_name}'
+KEY_LINKS            = KEY_TOPOLOGY + '/links{container_name}'
+KEY_DEVICE           = KEY_TOPOLOGY + '/device[{device_uuid}]' 
+KEY_DEVICE_ENDPOINTS = KEY_DEVICE   + '/endpoints{container_name}'
+KEY_ENDPOINT         = KEY_DEVICE   + '/endpoint[{endpoint_uuid}]' 
+KEY_LINK             = KEY_TOPOLOGY + '/link[{link_uuid}]' 
+KEY_LINK_ENDPOINTS   = KEY_LINK     + '/endpoints{container_name}'
+KEY_LINK_ENDPOINT    = KEY_LINK     + '/endpoint[{link_endpoint_uuid}]'
diff --git a/src/common/database/redis/context_api/Link.py b/src/common/database/redis/context_api/Link.py
new file mode 100644
index 0000000000000000000000000000000000000000..fc788a5a98440a852bac7f8233d129f2b87c7684
--- /dev/null
+++ b/src/common/database/redis/context_api/Link.py
@@ -0,0 +1,30 @@
+from typing import Dict
+from .tools._Entity import _Entity
+from .tools.EntityCollection import EntityCollection
+from .Keys import KEY_LINK_ENDPOINTS
+from .LinkEndpoint import LinkEndpoint
+
+class Link(_Entity):
+    def __init__(self, link_uuid : str, parent : 'Topology'): # type: ignore
+        super().__init__(link_uuid, parent=parent)
+        self.link_uuid = self.get_uuid()
+        self.topology_uuid = self._parent.get_uuid()
+        self.context_uuid = self._parent._parent.get_uuid()
+        self.endpoints = EntityCollection(self, KEY_LINK_ENDPOINTS)
+
+    def endpoint(self, link_endpoint_uuid : str) -> LinkEndpoint:
+        return LinkEndpoint(link_endpoint_uuid, self)
+
+    def create(self) -> 'Link':
+        self._parent.links.add(self.get_uuid())
+        return self
+
+    def delete(self) -> None:
+        self._parent.links.delete(self.get_uuid())
+
+    def dump(self) -> Dict:
+        endpoints = [self.endpoint(link_endpoint_uuid).dump() for link_endpoint_uuid in self.endpoints.get()]
+        return {
+            'link_id': {'link_id': {'uuid': self.link_uuid}},
+            'endpointList': endpoints
+        }
diff --git a/src/common/database/redis/context_api/LinkEndpoint.py b/src/common/database/redis/context_api/LinkEndpoint.py
new file mode 100644
index 0000000000000000000000000000000000000000..ff0033929819fa75672488307f4891ebf5d4fb65
--- /dev/null
+++ b/src/common/database/redis/context_api/LinkEndpoint.py
@@ -0,0 +1,44 @@
+from typing import Dict
+from .tools._Entity import _Entity
+from .tools.EntityAttributes import EntityAttributes
+from .Endpoint import Endpoint
+from .Keys import KEY_LINK_ENDPOINT
+
+VALIDATORS = {
+    'device_uuid': lambda v: v is None or isinstance(v, str),
+    'endpoint_uuid': lambda v: v is None or isinstance(v, str),
+}
+
+class LinkEndpoint(_Entity):
+    def __init__(self, link_endpoint_uuid : str, parent : 'Link'): # type: ignore
+        super().__init__(link_endpoint_uuid, parent=parent)
+        self.link_endpoint_uuid = self.get_uuid()
+        self.link_uuid = self._parent.get_uuid()
+        self.topology_uuid = self._parent._parent.get_uuid()
+        self.context_uuid = self._parent._parent._parent.get_uuid()
+        self.attributes = EntityAttributes(self, KEY_LINK_ENDPOINT, VALIDATORS)
+
+    def create(self, endpoint : Endpoint) -> 'LinkEndpoint':
+        self.update(update_attributes={
+            'device_uuid': endpoint._parent.get_uuid(),
+            'endpoint_uuid': endpoint.get_uuid(),
+        })
+        self._parent.endpoints.add(self.get_uuid())
+        return self
+
+    def update(self, update_attributes={}, remove_attributes=[]) -> 'LinkEndpoint':
+        self.attributes.update(update_attributes=update_attributes, remove_attributes=remove_attributes)
+        return self
+
+    def delete(self) -> None:
+        remove_attributes = ['device_uuid', 'endpoint_uuid']
+        self.update(remove_attributes=remove_attributes)
+        self._parent.endpoints.delete(self.get_uuid())
+
+    def dump(self) -> Dict:
+        attributes = self.attributes.get()
+        return {
+            'topoId': {'uuid': self.topology_uuid},
+            'dev_id': {'uuid': attributes.get('device_uuid', None)},
+            'port_id': {'uuid': attributes.get('endpoint_uuid', None)},
+        }
diff --git a/src/common/database/redis/context_api/OperationalStatus.py b/src/common/database/redis/context_api/OperationalStatus.py
new file mode 100644
index 0000000000000000000000000000000000000000..268c7f35961cdd1fc47b9a7bbbcdb7b51d2308c0
--- /dev/null
+++ b/src/common/database/redis/context_api/OperationalStatus.py
@@ -0,0 +1,18 @@
+from enum import Enum
+
+class OperationalStatus(Enum):
+    ENABLED = 1
+    DISABLED = 0
+
+TO_ENUM = {
+    1: OperationalStatus.ENABLED,
+    0: OperationalStatus.DISABLED,
+    '1': OperationalStatus.ENABLED,
+    '0': OperationalStatus.DISABLED,
+    'enabled': OperationalStatus.ENABLED,
+    'disabled': OperationalStatus.DISABLED,
+}
+
+def to_operationalstatus_enum(int_or_str):
+    if isinstance(int_or_str, str): int_or_str = int_or_str.lower()
+    return TO_ENUM.get(int_or_str)
diff --git a/src/common/database/redis/context_api/Topology.py b/src/common/database/redis/context_api/Topology.py
new file mode 100644
index 0000000000000000000000000000000000000000..6f0f327d55c3d2090df337dfcbb8fd74336f805e
--- /dev/null
+++ b/src/common/database/redis/context_api/Topology.py
@@ -0,0 +1,43 @@
+from typing import Dict
+from .tools._Entity import _Entity
+from .tools.EntityAttributes import EntityAttributes
+from .tools.EntityCollection import EntityCollection
+from .Keys import KEY_TOPOLOGY, KEY_DEVICES, KEY_LINKS
+from .Device import Device
+from .Link import Link
+
+VALIDATORS = {}
+
+class Topology(_Entity):
+    def __init__(self, topology_uuid : str, parent : 'Context'): # type: ignore
+        super().__init__(topology_uuid, parent=parent)
+        self.topology_uuid = self.get_uuid()
+        self.context_uuid = self._parent.get_uuid()
+        self.attributes = EntityAttributes(self, KEY_TOPOLOGY, validators=VALIDATORS)
+        self.devices = EntityCollection(self, KEY_DEVICES)
+        self.links = EntityCollection(self, KEY_LINKS)
+
+    def device(self, device_uuid : str) -> Device:
+        return Device(device_uuid, self)
+
+    def link(self, link_uuid : str) -> Link:
+        return Link(link_uuid, self)
+
+    def create(self) -> 'Topology':
+        self._parent.topologies.add(self.get_uuid())
+        return self
+
+    def delete(self):
+        self._parent.topologies.delete(self.get_uuid())
+
+    def dump(self) -> Dict:
+        devices = [self.device(device_uuid).dump() for device_uuid in self.devices.get()]
+        links   = [self.link  (link_uuid  ).dump() for link_uuid   in self.links.get()]
+        return {
+            'topoId': {
+                'contextId': {'contextUuid': {'uuid': self.context_uuid}},
+                'topoId': {'uuid': self.topology_uuid},
+            },
+            'device': devices,
+            'link': links,
+        }
diff --git a/src/common/database/redis/context_api/__init__.py b/src/common/database/redis/context_api/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/src/common/database/redis/context_api/__tests__.py b/src/common/database/redis/context_api/__tests__.py
new file mode 100644
index 0000000000000000000000000000000000000000..2cebf96b5c38d050dc936974fa05524b38bed25c
--- /dev/null
+++ b/src/common/database/redis/context_api/__tests__.py
@@ -0,0 +1,105 @@
+import json, logging, sys, time
+from redis.client import Redis
+from .tools.RedisTools import dump_keys
+from .Context import Context
+from .OperationalStatus import OperationalStatus
+
+logging.basicConfig(level=logging.INFO)
+LOGGER = logging.getLogger(__name__)
+
+def main():
+    redis_client = Redis('127.0.0.1', 31926)
+
+    LOGGER.info('Cleaning up...')
+    LOGGER.info('  keys before = {}'.format(str(redis_client.keys())))
+    keys = redis_client.keys()
+    for key in keys:
+        key_name = key.decode('UTF-8')
+        if(key_name == 'topology'): continue
+        redis_client.delete(key_name)
+    LOGGER.info('  keys after  = {}'.format(str(redis_client.keys())))
+
+    with Context('ctx-test', redis_client) as context:
+        topology = context.topology('base-topo').create()
+
+        device_1 = topology.device('dev1').create(type='ROADM', config='<config/>', operational_status=OperationalStatus.ENABLED)
+        endpoint_dev1_to_dev2 = device_1.endpoint('to-dev2').create(type='WDM')
+        endpoint_dev1_to_dev3 = device_1.endpoint('to-dev3').create(type='WDM')
+        endpoint_dev1_to_dev4 = device_1.endpoint('to-dev4').create(type='WDM')
+
+        device_2 = topology.device('dev2').create(type='ROADM', config='<config/>', operational_status=OperationalStatus.ENABLED)
+        endpoint_dev2_to_dev1 = device_2.endpoint('to-dev1').create(type='WDM')
+        endpoint_dev2_to_dev3 = device_2.endpoint('to-dev3').create(type='WDM')
+        endpoint_dev2_to_dev4 = device_2.endpoint('to-dev4').create(type='WDM')
+
+        device_3 = topology.device('dev3').create(type='ROADM', config='<config/>', operational_status=OperationalStatus.ENABLED)
+        endpoint_dev3_to_dev1 = device_3.endpoint('to-dev1').create(type='WDM')
+        endpoint_dev3_to_dev2 = device_3.endpoint('to-dev2').create(type='WDM')
+        endpoint_dev3_to_dev4 = device_3.endpoint('to-dev4').create(type='WDM')
+
+        device_4 = topology.device('dev4').create(type='ROADM', config='<config/>', operational_status=OperationalStatus.ENABLED)
+        endpoint_dev4_to_dev1 = device_4.endpoint('to-dev1').create(type='WDM')
+        endpoint_dev4_to_dev2 = device_4.endpoint('to-dev2').create(type='WDM')
+        endpoint_dev4_to_dev3 = device_4.endpoint('to-dev3').create(type='WDM')
+
+        link_dev1_to_dev2 = topology.link('dev1/to-dev2 ==> dev2/to-dev1').create()
+        link_dev1_to_dev2.endpoint('dev1/to-dev2').create(endpoint_dev1_to_dev2)
+        link_dev1_to_dev2.endpoint('dev2/to-dev1').create(endpoint_dev2_to_dev1)
+
+        link_dev1_to_dev3 = topology.link('dev1/to-dev3 ==> dev3/to-dev1').create()
+        link_dev1_to_dev3.endpoint('dev1/to-dev3').create(endpoint_dev1_to_dev3)
+        link_dev1_to_dev3.endpoint('dev3/to-dev1').create(endpoint_dev3_to_dev1)
+
+        link_dev1_to_dev4 = topology.link('dev1/to-dev4 ==> dev4/to-dev1').create()
+        link_dev1_to_dev4.endpoint('dev1/to-dev4').create(endpoint_dev1_to_dev4)
+        link_dev1_to_dev4.endpoint('dev4/to-dev1').create(endpoint_dev4_to_dev1)
+
+        link_dev2_to_dev1 = topology.link('dev2/to-dev1 ==> dev1/to-dev2').create()
+        link_dev2_to_dev1.endpoint('dev2/to-dev1').create(endpoint_dev2_to_dev1)
+        link_dev2_to_dev1.endpoint('dev1/to-dev2').create(endpoint_dev1_to_dev2)
+
+        link_dev2_to_dev3 = topology.link('dev2/to-dev3 ==> dev3/to-dev2').create()
+        link_dev2_to_dev3.endpoint('dev2/to-dev3').create(endpoint_dev2_to_dev3)
+        link_dev2_to_dev3.endpoint('dev3/to-dev2').create(endpoint_dev3_to_dev2)
+
+        link_dev2_to_dev4 = topology.link('dev2/to-dev4 ==> dev4/to-dev2').create()
+        link_dev2_to_dev4.endpoint('dev2/to-dev4').create(endpoint_dev2_to_dev4)
+        link_dev2_to_dev4.endpoint('dev4/to-dev2').create(endpoint_dev4_to_dev2)
+
+        link_dev3_to_dev1 = topology.link('dev3/to-dev1 ==> dev1/to-dev3').create()
+        link_dev3_to_dev1.endpoint('dev3/to-dev1').create(endpoint_dev3_to_dev1)
+        link_dev3_to_dev1.endpoint('dev1/to-dev3').create(endpoint_dev1_to_dev3)
+
+        link_dev3_to_dev2 = topology.link('dev3/to-dev2 ==> dev2/to-dev3').create()
+        link_dev3_to_dev2.endpoint('dev3/to-dev2').create(endpoint_dev3_to_dev2)
+        link_dev3_to_dev2.endpoint('dev2/to-dev3').create(endpoint_dev2_to_dev3)
+
+        link_dev3_to_dev4 = topology.link('dev3/to-dev4 ==> dev4/to-dev3').create()
+        link_dev3_to_dev4.endpoint('dev3/to-dev4').create(endpoint_dev3_to_dev4)
+        link_dev3_to_dev4.endpoint('dev4/to-dev3').create(endpoint_dev4_to_dev3)
+
+        link_dev4_to_dev1 = topology.link('dev4/to-dev1 ==> dev1/to-dev4').create()
+        link_dev4_to_dev1.endpoint('dev4/to-dev1').create(endpoint_dev4_to_dev1)
+        link_dev4_to_dev1.endpoint('dev1/to-dev4').create(endpoint_dev1_to_dev4)
+
+        link_dev4_to_dev2 = topology.link('dev4/to-dev2 ==> dev2/to-dev4').create()
+        link_dev4_to_dev2.endpoint('dev4/to-dev2').create(endpoint_dev4_to_dev2)
+        link_dev4_to_dev2.endpoint('dev2/to-dev4').create(endpoint_dev2_to_dev4)
+
+        link_dev4_to_dev3 = topology.link('dev4/to-dev3 ==> dev3/to-dev4').create()
+        link_dev4_to_dev3.endpoint('dev4/to-dev3').create(endpoint_dev4_to_dev3)
+        link_dev4_to_dev3.endpoint('dev3/to-dev4').create(endpoint_dev3_to_dev4)
+
+        dump_keys(redis_client, LOGGER)
+
+    with Context('ctx-test', redis_client) as context:
+        t0 = time.time()
+        json_topology = context.topology('base-topo').dump()
+        t1 = time.time()
+        LOGGER.info(json.dumps(json_topology))
+        LOGGER.info('Dump from Redis elapsed: {}'.format(1000.0 * (t1-t0)))
+
+    return(0)
+
+if __name__=='__main__':
+    sys.exit(main())
diff --git a/src/common/database/redis/context_api/structure.txt b/src/common/database/redis/context_api/structure.txt
new file mode 100644
index 0000000000000000000000000000000000000000..b03250ff1b03e8fd4fbc31306c9430ac5c7c542e
--- /dev/null
+++ b/src/common/database/redis/context_api/structure.txt
@@ -0,0 +1,107 @@
+############################
+# Redis internal structure #
+############################
+
+Note (1): for containers like topologies, devices, links, etc. two containers are defined:
+list    List is a sorted list containing the uuid's of th elements belonging to the parent element. It is used to
+        define the order of the elements and enable to iterate them deterministically.
+
+set     Set is an unordered set containing the uuid's of the elements belonging to the parent element. It is used to
+        check existence of elements within the parent in O(1).
+
+
+Context structure
+-----------------
+context[<context_uuid>]/lock
+    String containing the mutex owner key of that user/process/thread that is currently managing the context.
+    Example:
+        context[ctx-test]/lock
+            dc56647a-9539-446e-9a61-cbc67de328e4
+
+context[<context_uuid>]/topologies_<container>
+    Containers (see Note 1) with the topology_uuid's belonging to the context.
+    Examples:
+        context[ctx-test]/topologies_list
+            ['base-topo', 'other-topo']
+        context[ctx-test]/topologies_set
+            {'base-topo', 'other-topo'}
+
+
+Topology structure:
+-------------------
+context[<context_uuid>]/topology[<topology_uuid>]
+    Hash set containing the attributes for the topology.
+    NOTE: Currently not used.
+    Example: <none>
+
+context[<context_uuid>]/topology[<topology_uuid>]/devices_<container>
+    Containers (see Note 1) with the device_uuid's belonging to the topology.
+    Examples:
+        context[ctx-test]/topology[base-topo]/device_list
+            ['dev1', 'dev2', 'dev3', 'dev4']
+        context[ctx-test]/topology[base-topo]/device_set
+            {'dev2', 'dev3', 'dev4', 'dev1'}
+
+context[<context_uuid>]/topology[<topology_uuid>]/links_<container>
+    Containers (see Note 1) with the link_uuid's belonging to the topology.
+    Examples:
+        context[ctx-test]/topology[base-topo]/link_list
+            ['dev1/to-dev2 ==> dev2/to-dev1', 'dev1/to-dev3 ==> dev3/to-dev1', 'dev2/to-dev1 ==> dev1/to-dev2', ...]
+        context[ctx-test]/topology[base-topo]/link_set
+            {'dev2/to-dev1 ==> dev1/to-dev2', 'dev1/to-dev2 ==> dev2/to-dev1', 'dev1/to-dev3 ==> dev3/to-dev1', ...}
+
+
+Device structure:
+-----------------
+context[<context_uuid>]/topology[<topology_uuid>]/device[<device_uuid>]
+    Hash set containing the attributes for the device.
+    Defined attributes are:
+        device_type              : string
+        device_config            : string
+        device_operational_status: string "0"/"1"
+    Example: {'device_type': 'ROADM', 'device_config': '<config/>', 'device_operational_status': '1'}
+
+context[<context_uuid>]/topology[<topology_uuid>]/device[<device_uuid>]/endpoints_<container>
+    Containers (see Note 1) with the endpoints_uuid's belonging to the device.
+    Examples:
+        context[ctx-test]/topology[base-topo]/device[dev1]/endpoints_list
+            ['to-dev2', 'to-dev3', 'to-dev4']
+        context[ctx-test]/topology[base-topo]/device[dev1]/endpoints_set
+            {'to-dev3', 'to-dev2', 'to-dev4'}
+
+
+Device Endpoint structure:
+--------------------------
+context[<context_uuid>]/topology[<topology_uuid>]/device[<device_uuid>]/endpoint[<endpoint_uuid>]
+    Hash set containing the attributes for the endpoint.
+    Defined attributes are:
+        port_type: string
+    Example: {'port_type': 'WDM'}
+
+
+Link structure:
+---------------
+context[<context_uuid>]/topology[<topology_uuid>]/link[<link_uuid>]
+    Hash set containing the attributes for the link.
+    NOTE: Currently not used.
+    Example: <none>
+
+context[<context_uuid>]/topology[<topology_uuid>]/link[<link_uuid>]/endpoints_<container>
+    Containers (see Note 1) with the link_endpoint_uuid's belonging to the link.
+    Examples:
+        context[ctx-test]/topology[base-topo]/link[dev2/to-dev1 ==> dev1/to-dev2]/endpoints_list
+            ['dev2/to-dev1', 'dev1/to-dev2']
+        context[ctx-test]/topology[base-topo]/link[dev2/to-dev1 ==> dev1/to-dev2]/endpoints_set
+            {'dev2/to-dev1', 'dev1/to-dev2'}
+
+
+Link Endpoint structure:
+------------------------
+context[<context_uuid>]/topology[<topology_uuid>]/link[<link_uuid>]/endpoint[<link_endpoint_uuid>]
+    Hash set containing the attributes for the link_endpoint.
+    Defined attributes are:
+        device_uuid: string
+        endpoint_uuid: string
+    Example:
+        context[ctx-test]/topology[base-topo]/link[dev1/to-dev2 ==> dev2/to-dev1]/endpointdev1/to-dev2
+            {'device_uuid': 'dev1', 'endpoint_uuid': 'to-dev2'}
diff --git a/src/common/database/redis/context_api/tools/EntityAttributes.py b/src/common/database/redis/context_api/tools/EntityAttributes.py
new file mode 100644
index 0000000000000000000000000000000000000000..e8c2e691aed39e03c8668e55f3b723b1727d9e4b
--- /dev/null
+++ b/src/common/database/redis/context_api/tools/EntityAttributes.py
@@ -0,0 +1,66 @@
+import copy
+from redis.client import Redis
+
+class EntityAttributes:
+    def __init__(self, parent, entity_key_attributes, validators, transcoders={}):
+        self.__parent = parent
+        self.__redis_client : Redis = self.__parent.get_context().get_redis_client()
+        self.__entity_key_attributes = entity_key_attributes.format(**self.__parent.__dict__)
+        self.__validators = validators
+        self.__transcoders = transcoders
+
+    def validate(self, update_attributes, remove_attributes, attribute_name):
+        remove_attributes.discard(attribute_name)
+        value = update_attributes.pop(attribute_name, None)
+        validator = self.__validators.get(attribute_name)
+        if not validator(value): raise AttributeError('{} is invalid'.format(attribute_name))
+
+    def transcode(self, attribute_name, attribute_value):
+        transcoder_set = self.__transcoders.get(attribute_name, {})
+        transcoder = transcoder_set.get(type(attribute_value))
+        return attribute_value if transcoder is None else transcoder(attribute_value)
+
+    def get(self, attributes=[]):
+        if len(attributes) == 0: # retrieve all
+            keys_values = self.__redis_client.hgetall(self.__entity_key_attributes).items()
+        else: # retrieve selected
+            names = list(attributes)
+            keys_values = zip(names, self.__redis_client.hmget(self.__entity_key_attributes, attributes))
+
+        attributes = {}
+        for key,value in keys_values:
+            str_key = key.decode('UTF-8') if isinstance(key, bytes) else key
+            str_value = value.decode('UTF-8') if isinstance(value, bytes) else value
+            attributes[str_key] = self.transcode(str_key, str_value)
+        return attributes
+
+    def update(self, update_attributes={}, remove_attributes=[]):
+        remove_attributes = set(remove_attributes)
+        copy_update_attributes = copy.deepcopy(update_attributes)
+        copy_remove_attributes = copy.deepcopy(remove_attributes)
+
+        for attribute_name in self.__validators.keys():
+            self.validate(copy_update_attributes, copy_remove_attributes, attribute_name)
+            attribute_value = update_attributes.get(attribute_name)
+            if attribute_value is None: continue
+            update_attributes[attribute_name] = self.transcode(attribute_name, attribute_value)
+
+        if len(copy_update_attributes) > 0:
+            raise AttributeError('Unexpected update_attributes: {}'.format(str(copy_update_attributes)))
+
+        if len(copy_remove_attributes) > 0:
+            raise AttributeError('Unexpected remove_attributes: {}'.format(str(copy_remove_attributes)))
+
+        if len(remove_attributes) > 0:
+            self.__redis_client.hdel(self.__entity_key_attributes, remove_attributes)
+
+        if len(update_attributes) > 0:
+            self.__redis_client.hmset(self.__entity_key_attributes, update_attributes)
+
+        return self
+
+    def delete(self, attributes=[]):
+        if len(attributes) == 0: # delete all
+            self.__redis_client.delete(self.__entity_key_attributes)
+        else: # delete selected
+            self.__redis_client.hdel(self.__entity_key_attributes, attributes)
diff --git a/src/common/database/redis/context_api/tools/EntityCollection.py b/src/common/database/redis/context_api/tools/EntityCollection.py
new file mode 100644
index 0000000000000000000000000000000000000000..d8f5bd373a810b761d1ebfc36dbc2f0a0bed372a
--- /dev/null
+++ b/src/common/database/redis/context_api/tools/EntityCollection.py
@@ -0,0 +1,24 @@
+from redis.client import Redis
+
+class EntityCollection:
+    def __init__(self, parent, entity_key):
+        self.__parent = parent
+        self.__redis_client : Redis = self.__parent.get_context().get_redis_client()
+        self.__entity_key_list = entity_key.format(container_name='_list', **self.__parent.__dict__)
+        self.__entity_key_set = entity_key.format(container_name='_set', **self.__parent.__dict__)
+
+    def add(self, entity_uuid):
+        if self.__redis_client.sismember(self.__entity_key_set, entity_uuid) == 1: return
+        self.__redis_client.sadd(self.__entity_key_set, entity_uuid)
+        self.__redis_client.rpush(self.__entity_key_list, entity_uuid)
+
+    def get(self):
+        return list(map(lambda m: m.decode('UTF-8'), self.__redis_client.lrange(self.__entity_key_list, 0, -1)))
+
+    def contains(self, entity_uuid):
+        return self.__redis_client.sismember(self.__entity_key_set, entity_uuid) == 1
+
+    def delete(self, entity_uuid):
+        if self.__redis_client.sismember(self.__entity_key_set, entity_uuid) == 0: return
+        self.__redis_client.srem(self.__entity_key_set, entity_uuid)
+        self.__redis_client.lrem(self.__entity_key_list, 1, entity_uuid)
diff --git a/src/common/database/redis/context_api/tools/Mutex.py b/src/common/database/redis/context_api/tools/Mutex.py
new file mode 100644
index 0000000000000000000000000000000000000000..40d3b8afa1cb70e03bca0b007c2ac0d6535de1fd
--- /dev/null
+++ b/src/common/database/redis/context_api/tools/Mutex.py
@@ -0,0 +1,121 @@
+import random, time, uuid
+from typing import Set, Union
+from redis.client import Redis
+
+KEY_LOCK = '{}/lock'
+MIN_WAIT_TIME = 0.01
+
+class Mutex:
+    def __init__(self, redis_client: Redis) -> None:
+        if not isinstance(redis_client, Redis):
+            raise AttributeError('redis_client must be an instance of redis.client.Redis')
+        self.redis_client = redis_client
+        self.script_release = None
+        self.script_refresh_expire = None
+        self.__register_scripts()
+
+    def __register_scripts(self):
+        # Script mutex_release
+        #   Description: atomic script to release a set of mutex keys, only if all mutex keys are owned by the caller.
+        #                if owner_key matches key stored in all mutexes, remove all mutexes and return 1. if some key
+        #                does not match, do nothing and return 0.
+        #   Keys: set of entity_keys to be released
+        #   Args: owner_key
+        #   Ret : 1 if all keys have been released, 0 otherwise (no action performed)
+        #   Use : acquired = (int(self.script_release(keys=['mutex1', 'mutex2'], args=[owner_key])) == 1)
+        self.script_release = self.redis_client.register_script('\n'.join([
+            "for _,key in ipairs(KEYS) do",
+            "    local owner_key = redis.call('get', key)",
+            "    if owner_key ~= ARGV[1] then return 0 end",
+            "end",
+            "for _,key in ipairs(KEYS) do",
+            "    redis.call('del', key)",
+            "end",
+            "return 1",
+        ]))
+
+        # Script mutex_refresh_expire
+        #   Description: atomic script to refresh expiracy of a set of mutex keys, only if all of them are owned by the
+        #                caller. if owner_key matches key stored in all mutexes, refresh expiracy on all mutexes and
+        #                return 1. if some key does not match, do nothing and return 0.
+        #   Keys: set of entity_keys to be refreshed
+        #   Args: owner_key, expiracy_seconds
+        #   Ret : 1 if all keys have been refreshed, 0 otherwise (no action performed)
+        #   Use : done = (int(self.script_refresh_expire(keys=['mutex1', 'mutex2'], args=[owner_key, seconds])) == 1)
+        self.script_refresh_expire = self.redis_client.register_script('\n'.join([
+            "for _,key in ipairs(KEYS) do",
+            "    local owner_key = redis.call('get', key)",
+            "    if owner_key ~= ARGV[1] then return 0 end",
+            "end",
+            "for _,key in ipairs(KEYS) do",
+            "    redis.call('expire', key, ARGV[2])",
+            "end",
+            "return 1",
+        ]))
+
+    def acquire(self, entity_key_or_keys : Union[str, Set[str]], owner_key : Union[str, None] = None,
+                blocking : bool = True, timeout : Union[float, int] = 5,
+                expiracy_seconds : Union[float, int, None] = None):
+        # Atomically set all entity_keys or none of them.
+        # entity_key_or_keys contains either a string with a specific entity key or a set with all entity keys to be
+        # set atomically.
+        # owner_key enables to specify the desired key to use to mark the mutex. When releasing, the owner_key must be
+        # correct, otherwise, the key will not be released. It can also be used to check if mutex is still owned by
+        # oneself or was lost and acquired by another party. If set to None, a random key is generated and returned
+        # together with the acquired boolean value.
+        # blocking defines wether the acquisition should be blocking, meaning that acquisition will be retired with
+        # random increments until timeout timeout is elapsed.
+        # Optionally, an expiracy_seconds period can be specified in expiracy_seconds. If mutex is not released after
+        # that period of time, the mutex will be released automatically.
+        # If mutex(es) is(are) acquired, the method returns True and the owner_key used to create the lock; otherwise,
+        # False and None owner_key are returned.
+
+        owner_key = owner_key or str(uuid.uuid4())
+        entity_keys = entity_key_or_keys if isinstance(entity_key_or_keys, set) else {str(entity_key_or_keys)}
+        entity_key_map = {KEY_LOCK.format(entity_key):owner_key for entity_key in entity_keys}
+        acquired = False
+        if blocking:
+            remaining_wait_time = timeout
+            while not acquired:
+                acquired = (self.redis_client.msetnx(entity_key_map) == 1)
+                if acquired: break
+                if remaining_wait_time < MIN_WAIT_TIME: return False, None
+                wait_time = remaining_wait_time * random.random()
+                remaining_wait_time -= wait_time
+                time.sleep(wait_time)
+        else:
+            acquired = (self.redis_client.msetnx(entity_key_map) == 1)
+
+        if not acquired: return False, None
+
+        if expiracy_seconds is not None:
+            pipeline = self.redis_client.pipeline()
+            for entity_key in entity_key_map.keys(): pipeline.expire(entity_key, expiracy_seconds)
+            pipeline.execute()
+
+        return True, owner_key
+
+    def release(self, entity_key_or_keys : Union[str, Set[str]], owner_key : str):
+        # release mutex keys only if all of them are owned by the caller
+        # return True if succeeded, False (nothing changed) otherwise
+        entity_keys = entity_key_or_keys if isinstance(entity_key_or_keys, set) else {str(entity_key_or_keys)}
+        entity_keys = {KEY_LOCK.format(entity_key) for entity_key in entity_keys}
+        return int(self.script_release(keys=list(entity_keys), args=[owner_key])) == 1
+
+    def acquired(self, entity_key : str, owner_key : str):
+        # check if a mutex is owned by the owner with owner_key
+        value = self.redis_client.get(KEY_LOCK.format(entity_key))
+        if(value is None): return(False)
+        return str(value) == owner_key
+
+    def get_ttl(self, entity_key : str):
+        # check a mutex's time to live
+        return self.redis_client.ttl(KEY_LOCK.format(entity_key))
+
+    def refresh_expiracy(self, entity_key_or_keys : Union[str, Set[str]], owner_key : str,
+                         expiracy_seconds : Union[float, int]):
+        # refresh expiracy on specified mutex keys only if all of them are owned by the caller
+        # return True if succeeded, False (nothing changed) otherwise
+        entity_keys = entity_key_or_keys if isinstance(entity_key_or_keys, set) else {str(entity_key_or_keys)}
+        entity_keys = {KEY_LOCK.format(entity_key) for entity_key in entity_keys}
+        return int(self.script_refresh_expire(keys=entity_keys, args=[owner_key, expiracy_seconds])) == 1
diff --git a/src/common/database/redis/context_api/tools/RedisTools.py b/src/common/database/redis/context_api/tools/RedisTools.py
new file mode 100644
index 0000000000000000000000000000000000000000..a1ae7800dcfe6a58930d300ecffaaf7107e5c145
--- /dev/null
+++ b/src/common/database/redis/context_api/tools/RedisTools.py
@@ -0,0 +1,16 @@
+def dump_keys(redis_client, logger):
+    logger.info('Dump keys...')
+    keys = redis_client.keys()
+    logger.info('  keys = {}'.format(str(keys)))
+    for key_name in keys:
+        key_name = key_name.decode('UTF-8')
+        if not key_name.startswith('context'): continue
+        key_type = redis_client.type(key_name)
+        if key_type is not None: key_type = key_type.decode('UTF-8')
+        key_content = {
+            'hash'  : lambda key: {k.decode('UTF-8'):v.decode('UTF-8') for k,v in redis_client.hgetall(key).items()},
+            'list'  : lambda key: [m.decode('UTF-8') for m in redis_client.lrange(key, 0, -1)],
+            'set'   : lambda key: {m.decode('UTF-8') for m in redis_client.smembers(key)},
+            'string': lambda key: redis_client.get(key).decode('UTF-8'),
+        }.get(key_type, lambda key: 'UNSUPPORTED_TYPE')
+        logger.info('  key {} {}: {}'.format(key_type, key_name, key_content(key_name)))
diff --git a/src/common/database/redis/context_api/tools/_Entity.py b/src/common/database/redis/context_api/tools/_Entity.py
new file mode 100644
index 0000000000000000000000000000000000000000..ab806f796fa44939485ab3092f593be8d373a7ee
--- /dev/null
+++ b/src/common/database/redis/context_api/tools/_Entity.py
@@ -0,0 +1,25 @@
+from typing import Dict
+
+class _Entity:
+    def __init__(self, entity_uuid, parent=None):
+        if (parent is None) or (not isinstance(parent, _Entity)):
+            raise AttributeError('parent must be an instance of _Entity')
+        self._entity_uuid = entity_uuid
+        self._parent = parent
+        self._context = self._parent.get_context()
+
+    def get_uuid(self): return(self._entity_uuid)
+    def get_parent(self): return(self._parent)
+    def get_context(self): return(self._context)
+
+    def load(self):
+        raise NotImplementedError()
+
+    def create(self):
+        raise NotImplementedError()
+
+    def delete(self):
+        raise NotImplementedError()
+
+    def dump(self) -> Dict:
+        raise NotImplementedError()
diff --git a/src/common/database/redis/context_api/tools/__init__.py b/src/common/database/redis/context_api/tools/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/src/device/.gitlab-ci.yml b/src/device/.gitlab-ci.yml
new file mode 100644
index 0000000000000000000000000000000000000000..4a36939322e9bc774cd14fb0ade1c46947b71cdd
--- /dev/null
+++ b/src/device/.gitlab-ci.yml
@@ -0,0 +1,100 @@
+variables:
+  IMAGE_NAME: 'context' # name of the microservice
+  IMAGE_NAME_TEST: 'context-test' # name of the microservice
+  IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
+
+# build the Docker image
+build context:
+  stage: build
+  script:
+    - docker build -t "$IMAGE_NAME:$IMAGE_TAG" -f ./src/$IMAGE_NAME/Dockerfile ./src/
+  rules:
+    - changes:
+      - src/$IMAGE_NAME/**
+      - .gitlab-ci.yml
+
+# tags the Docker image
+tag context:
+  stage: build
+  script:
+    - docker tag "$IMAGE_NAME:$IMAGE_TAG" "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
+  rules:
+    - changes:
+      - src/$IMAGE_NAME/**
+      - .gitlab-ci.yml
+
+# push the Docker image to the gitlab Docker registry
+push context:
+  stage: build
+  before_script:
+    - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
+  script:
+    - docker push "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
+  rules:
+    - changes:
+      - src/$IMAGE_NAME/**
+      - .gitlab-ci.yml
+
+
+# test if the Docker image can be pulled from the gitlab registry
+test context pull:
+  stage: test
+  needs:
+    - push context
+  before_script:
+    - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
+  script:
+    - docker pull "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
+  rules:
+    - changes:
+      - src/$IMAGE_NAME/**
+      - .gitlab-ci.yml
+
+# test if the Docker image can be executed
+test context run:
+  stage: test
+  needs:
+    - build context
+  before_script:
+    - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create -d bridge teraflowbridge; fi  
+  script:
+    - docker run -d -p 1010:1010 --name context --network=teraflowbridge --rm "$IMAGE_NAME:$IMAGE_TAG"
+    - docker ps > deploy_test_report.txt
+  after_script:
+    - docker stop context
+  rules:
+    - changes:
+      - src/$IMAGE_NAME/**
+      - .gitlab-ci.yml
+  artifacts:
+    when: always
+    paths:
+      - deploy_test_report.txt
+    expire_in: 1 day
+
+# apply unit test to the context component
+test context pytest:
+  stage: test
+  needs:
+    - build context
+  script:
+    - docker build -t "$IMAGE_NAME_TEST:$IMAGE_TAG" -f ./src/$IMAGE_NAME/tests/Dockerfile ./src/ > pytest_report.txt
+  rules:
+    - changes:
+      - src/$IMAGE_NAME/**
+      - .gitlab-ci.yml
+  artifacts:
+      when: always
+      paths:
+        - pytest_report.txt
+      expire_in: 1 day
+
+# Deployment of the monitoring service in Kubernetes Cluster
+deploy context:
+  stage: deploy
+  needs:
+    - build context
+    - test context run
+  script:
+    - kubectl apply -f "manisfests/contextservice.yaml"
+  when: manual
diff --git a/src/device/Config.py b/src/device/Config.py
new file mode 100644
index 0000000000000000000000000000000000000000..2f2114d6195e5979ad5db736748616091c087a04
--- /dev/null
+++ b/src/device/Config.py
@@ -0,0 +1,10 @@
+import logging
+
+# gRPC settings
+SERVICE_PORT = 7070
+MAX_WORKERS  = 10
+GRACE_PERIOD = 60
+LOG_LEVEL    = logging.WARNING
+
+# Prometheus settings
+METRICS_PORT = 8080
diff --git a/src/device/Dockerfile b/src/device/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..52eb5c82cce99ebc488ae586fd29375dba709a09
--- /dev/null
+++ b/src/device/Dockerfile
@@ -0,0 +1,35 @@
+FROM python:3-slim
+
+# Install dependencies
+RUN apt-get --yes --quiet --quiet update && \
+    apt-get --yes --quiet --quiet install wget g++ && \
+    rm -rf /var/lib/apt/lists/*
+
+# Set Python to show logs as they occur
+ENV PYTHONUNBUFFERED=0
+
+# Download the gRPC health probe
+RUN GRPC_HEALTH_PROBE_VERSION=v0.2.0 && \
+    wget -qO/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-amd64 && \
+    chmod +x /bin/grpc_health_probe
+
+# Get generic Python packages
+RUN python3 -m pip install --upgrade pip setuptools wheel pip-tools
+
+# Set working directory
+WORKDIR /var/teraflow
+
+# Create module sub-folders
+RUN mkdir -p /var/teraflow/device
+
+# Get Python packages per module
+COPY device/requirements.in device/requirements.in
+RUN pip-compile --output-file=device/requirements.txt device/requirements.in
+RUN python3 -m pip install -r device/requirements.in
+
+# Add files into working directory
+COPY common/. common
+COPY device/. device
+
+# Start device service
+ENTRYPOINT ["python", "-m", "device.service"]
diff --git a/src/device/__init__.py b/src/device/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/src/device/client/DeviceClient.py b/src/device/client/DeviceClient.py
new file mode 100644
index 0000000000000000000000000000000000000000..30b6a53f54c067e5eed8d31f8ca3cf5720152db1
--- /dev/null
+++ b/src/device/client/DeviceClient.py
@@ -0,0 +1,53 @@
+import grpc, logging
+from google.protobuf.json_format import MessageToDict
+from common.tools.RetryDecorator import retry, delay_exponential
+from device.proto.device_pb2_grpc import DeviceServiceStub
+
+LOGGER = logging.getLogger(__name__)
+MAX_RETRIES = 15
+DELAY_FUNCTION = delay_exponential(initial=0.01, increment=2.0, maximum=5.0)
+
+class DeviceClient:
+    def __init__(self, address, port):
+        self.endpoint = '{}:{}'.format(address, port)
+        LOGGER.debug('Creating channel to {}...'.format(self.endpoint))
+        self.channel = None
+        self.stub = None
+        self.connect()
+        LOGGER.debug('Channel created')
+
+    def connect(self):
+        self.channel = grpc.insecure_channel(self.endpoint)
+        self.stub = DeviceServiceStub(self.channel)
+
+    def close(self):
+        if(self.channel is not None): self.channel.close()
+        self.channel = None
+        self.stub = None
+
+    @retry(exceptions=set(), max_retries=MAX_RETRIES, delay_function=DELAY_FUNCTION, prepare_method_name='connect')
+    def AddDevice(self, request):
+        LOGGER.debug('AddDevice request: {}'.format(request))
+        response = self.stub.AddDevice(request)
+        LOGGER.debug('AddDevice result: {}'.format(response))
+        return MessageToDict(
+            response, including_default_value_fields=True, preserving_proto_field_name=True,
+            use_integers_for_enums=False)
+
+    @retry(exceptions=set(), max_retries=MAX_RETRIES, delay_function=DELAY_FUNCTION, prepare_method_name='connect')
+    def ConfigureDevice(self, request):
+        LOGGER.debug('ConfigureDevice request: {}'.format(request))
+        response = self.stub.ConfigureDevice(request)
+        LOGGER.debug('ConfigureDevice result: {}'.format(response))
+        return MessageToDict(
+            response, including_default_value_fields=True, preserving_proto_field_name=True,
+            use_integers_for_enums=False)
+
+    @retry(exceptions=set(), max_retries=MAX_RETRIES, delay_function=DELAY_FUNCTION, prepare_method_name='connect')
+    def DeleteDevice(self, request):
+        LOGGER.debug('DeleteDevice request: {}'.format(request))
+        response = self.stub.DeleteDevice(request)
+        LOGGER.debug('DeleteDevice result: {}'.format(response))
+        return MessageToDict(
+            response, including_default_value_fields=True, preserving_proto_field_name=True,
+            use_integers_for_enums=False)
diff --git a/src/device/client/__init__.py b/src/device/client/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/src/device/genproto.sh b/src/device/genproto.sh
new file mode 100755
index 0000000000000000000000000000000000000000..72fc512ba9056fd5a1cc1da472f48bab7d802e1a
--- /dev/null
+++ b/src/device/genproto.sh
@@ -0,0 +1,31 @@
+#!/bin/bash -eu
+#
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#!/bin/bash -e
+
+# Make folder containing the script the root folder for its execution
+cd $(dirname $0)
+
+rm -rf proto/*.py
+touch proto/__init__.py
+
+python -m grpc_tools.protoc -I../../proto --python_out=proto --grpc_python_out=proto context.proto
+python -m grpc_tools.protoc -I../../proto --python_out=proto --grpc_python_out=proto device.proto
+
+sed -i -E 's/(import\ .*)_pb2/from device.proto \1_pb2/g' proto/context_pb2.py
+sed -i -E 's/(import\ .*)_pb2/from device.proto \1_pb2/g' proto/context_pb2_grpc.py
+sed -i -E 's/(import\ .*)_pb2/from device.proto \1_pb2/g' proto/device_pb2.py
+sed -i -E 's/(import\ .*)_pb2/from device.proto \1_pb2/g' proto/device_pb2_grpc.py
diff --git a/src/device/proto/__init__.py b/src/device/proto/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/src/device/proto/context_pb2.py b/src/device/proto/context_pb2.py
new file mode 100644
index 0000000000000000000000000000000000000000..e4acb11a579694017d1ee5572f1f94848731802a
--- /dev/null
+++ b/src/device/proto/context_pb2.py
@@ -0,0 +1,805 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler.  DO NOT EDIT!
+# source: context.proto
+"""Generated protocol buffer code."""
+from google.protobuf.internal import enum_type_wrapper
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+  name='context.proto',
+  package='context',
+  syntax='proto3',
+  serialized_options=None,
+  create_key=_descriptor._internal_create_key,
+  serialized_pb=b'\n\rcontext.proto\x12\x07\x63ontext\"\x07\n\x05\x45mpty\"{\n\x07\x43ontext\x12%\n\tcontextId\x18\x01 \x01(\x0b\x32\x12.context.ContextId\x12\x1f\n\x04topo\x18\x02 \x01(\x0b\x32\x11.context.Topology\x12(\n\x03\x63tl\x18\x03 \x01(\x0b\x32\x1b.context.TeraFlowController\"/\n\tContextId\x12\"\n\x0b\x63ontextUuid\x18\x01 \x01(\x0b\x32\r.context.Uuid\"m\n\x08Topology\x12#\n\x06topoId\x18\x02 \x01(\x0b\x32\x13.context.TopologyId\x12\x1f\n\x06\x64\x65vice\x18\x03 \x03(\x0b\x32\x0f.context.Device\x12\x1b\n\x04link\x18\x04 \x03(\x0b\x32\r.context.Link\"1\n\x04Link\x12)\n\x0c\x65ndpointList\x18\x01 \x03(\x0b\x32\x13.context.EndPointId\"R\n\nTopologyId\x12%\n\tcontextId\x18\x01 \x01(\x0b\x32\x12.context.ContextId\x12\x1d\n\x06topoId\x18\x02 \x01(\x0b\x32\r.context.Uuid\"?\n\nConstraint\x12\x17\n\x0f\x63onstraint_type\x18\x01 \x01(\t\x12\x18\n\x10\x63onstraint_value\x18\x02 \x01(\t\"\xda\x01\n\x06\x44\x65vice\x12$\n\tdevice_id\x18\x01 \x01(\x0b\x32\x11.context.DeviceId\x12\x13\n\x0b\x64\x65vice_type\x18\x02 \x01(\t\x12,\n\rdevice_config\x18\x03 \x01(\x0b\x32\x15.context.DeviceConfig\x12>\n\x14\x64\x65vOperationalStatus\x18\x04 \x01(\x0e\x32 .context.DeviceOperationalStatus\x12\'\n\x0c\x65ndpointList\x18\x05 \x03(\x0b\x32\x11.context.EndPoint\"%\n\x0c\x44\x65viceConfig\x12\x15\n\rdevice_config\x18\x01 \x01(\t\"C\n\x08\x45ndPoint\x12$\n\x07port_id\x18\x01 \x01(\x0b\x32\x13.context.EndPointId\x12\x11\n\tport_type\x18\x02 \x01(\t\"t\n\nEndPointId\x12#\n\x06topoId\x18\x01 \x01(\x0b\x32\x13.context.TopologyId\x12!\n\x06\x64\x65v_id\x18\x02 \x01(\x0b\x32\x11.context.DeviceId\x12\x1e\n\x07port_id\x18\x03 \x01(\x0b\x32\r.context.Uuid\",\n\x08\x44\x65viceId\x12 \n\tdevice_id\x18\x01 \x01(\x0b\x32\r.context.Uuid\"\x14\n\x04Uuid\x12\x0c\n\x04uuid\x18\x01 \x01(\t\"K\n\x12TeraFlowController\x12\"\n\x06\x63tl_id\x18\x01 \x01(\x0b\x32\x12.context.ContextId\x12\x11\n\tipaddress\x18\x02 \x01(\t\"Q\n\x14\x41uthenticationResult\x12\"\n\x06\x63tl_id\x18\x01 \x01(\x0b\x32\x12.context.ContextId\x12\x15\n\rauthenticated\x18\x02 \x01(\x08*4\n\x17\x44\x65viceOperationalStatus\x12\x0c\n\x08\x44ISABLED\x10\x00\x12\x0b\n\x07\x45NABLED\x10\x01\x32\x44\n\x0e\x43ontextService\x12\x32\n\x0bGetTopology\x12\x0e.context.Empty\x1a\x11.context.Topology\"\x00\x62\x06proto3'
+)
+
+_DEVICEOPERATIONALSTATUS = _descriptor.EnumDescriptor(
+  name='DeviceOperationalStatus',
+  full_name='context.DeviceOperationalStatus',
+  filename=None,
+  file=DESCRIPTOR,
+  create_key=_descriptor._internal_create_key,
+  values=[
+    _descriptor.EnumValueDescriptor(
+      name='DISABLED', index=0, number=0,
+      serialized_options=None,
+      type=None,
+      create_key=_descriptor._internal_create_key),
+    _descriptor.EnumValueDescriptor(
+      name='ENABLED', index=1, number=1,
+      serialized_options=None,
+      type=None,
+      create_key=_descriptor._internal_create_key),
+  ],
+  containing_type=None,
+  serialized_options=None,
+  serialized_start=1195,
+  serialized_end=1247,
+)
+_sym_db.RegisterEnumDescriptor(_DEVICEOPERATIONALSTATUS)
+
+DeviceOperationalStatus = enum_type_wrapper.EnumTypeWrapper(_DEVICEOPERATIONALSTATUS)
+DISABLED = 0
+ENABLED = 1
+
+
+
+_EMPTY = _descriptor.Descriptor(
+  name='Empty',
+  full_name='context.Empty',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  create_key=_descriptor._internal_create_key,
+  fields=[
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  serialized_options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=26,
+  serialized_end=33,
+)
+
+
+_CONTEXT = _descriptor.Descriptor(
+  name='Context',
+  full_name='context.Context',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  create_key=_descriptor._internal_create_key,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='contextId', full_name='context.Context.contextId', index=0,
+      number=1, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR,  create_key=_descriptor._internal_create_key),
+    _descriptor.FieldDescriptor(
+      name='topo', full_name='context.Context.topo', index=1,
+      number=2, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR,  create_key=_descriptor._internal_create_key),
+    _descriptor.FieldDescriptor(
+      name='ctl', full_name='context.Context.ctl', index=2,
+      number=3, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR,  create_key=_descriptor._internal_create_key),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  serialized_options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=35,
+  serialized_end=158,
+)
+
+
+_CONTEXTID = _descriptor.Descriptor(
+  name='ContextId',
+  full_name='context.ContextId',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  create_key=_descriptor._internal_create_key,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='contextUuid', full_name='context.ContextId.contextUuid', index=0,
+      number=1, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR,  create_key=_descriptor._internal_create_key),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  serialized_options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=160,
+  serialized_end=207,
+)
+
+
+_TOPOLOGY = _descriptor.Descriptor(
+  name='Topology',
+  full_name='context.Topology',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  create_key=_descriptor._internal_create_key,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='topoId', full_name='context.Topology.topoId', index=0,
+      number=2, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR,  create_key=_descriptor._internal_create_key),
+    _descriptor.FieldDescriptor(
+      name='device', full_name='context.Topology.device', index=1,
+      number=3, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR,  create_key=_descriptor._internal_create_key),
+    _descriptor.FieldDescriptor(
+      name='link', full_name='context.Topology.link', index=2,
+      number=4, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR,  create_key=_descriptor._internal_create_key),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  serialized_options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=209,
+  serialized_end=318,
+)
+
+
+_LINK = _descriptor.Descriptor(
+  name='Link',
+  full_name='context.Link',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  create_key=_descriptor._internal_create_key,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='endpointList', full_name='context.Link.endpointList', index=0,
+      number=1, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR,  create_key=_descriptor._internal_create_key),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  serialized_options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=320,
+  serialized_end=369,
+)
+
+
+_TOPOLOGYID = _descriptor.Descriptor(
+  name='TopologyId',
+  full_name='context.TopologyId',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  create_key=_descriptor._internal_create_key,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='contextId', full_name='context.TopologyId.contextId', index=0,
+      number=1, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR,  create_key=_descriptor._internal_create_key),
+    _descriptor.FieldDescriptor(
+      name='topoId', full_name='context.TopologyId.topoId', index=1,
+      number=2, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR,  create_key=_descriptor._internal_create_key),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  serialized_options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=371,
+  serialized_end=453,
+)
+
+
+_CONSTRAINT = _descriptor.Descriptor(
+  name='Constraint',
+  full_name='context.Constraint',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  create_key=_descriptor._internal_create_key,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='constraint_type', full_name='context.Constraint.constraint_type', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=b"".decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR,  create_key=_descriptor._internal_create_key),
+    _descriptor.FieldDescriptor(
+      name='constraint_value', full_name='context.Constraint.constraint_value', index=1,
+      number=2, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=b"".decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR,  create_key=_descriptor._internal_create_key),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  serialized_options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=455,
+  serialized_end=518,
+)
+
+
+_DEVICE = _descriptor.Descriptor(
+  name='Device',
+  full_name='context.Device',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  create_key=_descriptor._internal_create_key,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='device_id', full_name='context.Device.device_id', index=0,
+      number=1, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR,  create_key=_descriptor._internal_create_key),
+    _descriptor.FieldDescriptor(
+      name='device_type', full_name='context.Device.device_type', index=1,
+      number=2, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=b"".decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR,  create_key=_descriptor._internal_create_key),
+    _descriptor.FieldDescriptor(
+      name='device_config', full_name='context.Device.device_config', index=2,
+      number=3, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR,  create_key=_descriptor._internal_create_key),
+    _descriptor.FieldDescriptor(
+      name='devOperationalStatus', full_name='context.Device.devOperationalStatus', index=3,
+      number=4, type=14, cpp_type=8, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR,  create_key=_descriptor._internal_create_key),
+    _descriptor.FieldDescriptor(
+      name='endpointList', full_name='context.Device.endpointList', index=4,
+      number=5, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR,  create_key=_descriptor._internal_create_key),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  serialized_options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=521,
+  serialized_end=739,
+)
+
+
+_DEVICECONFIG = _descriptor.Descriptor(
+  name='DeviceConfig',
+  full_name='context.DeviceConfig',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  create_key=_descriptor._internal_create_key,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='device_config', full_name='context.DeviceConfig.device_config', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=b"".decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR,  create_key=_descriptor._internal_create_key),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  serialized_options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=741,
+  serialized_end=778,
+)
+
+
+_ENDPOINT = _descriptor.Descriptor(
+  name='EndPoint',
+  full_name='context.EndPoint',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  create_key=_descriptor._internal_create_key,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='port_id', full_name='context.EndPoint.port_id', index=0,
+      number=1, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR,  create_key=_descriptor._internal_create_key),
+    _descriptor.FieldDescriptor(
+      name='port_type', full_name='context.EndPoint.port_type', index=1,
+      number=2, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=b"".decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR,  create_key=_descriptor._internal_create_key),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  serialized_options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=780,
+  serialized_end=847,
+)
+
+
+_ENDPOINTID = _descriptor.Descriptor(
+  name='EndPointId',
+  full_name='context.EndPointId',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  create_key=_descriptor._internal_create_key,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='topoId', full_name='context.EndPointId.topoId', index=0,
+      number=1, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR,  create_key=_descriptor._internal_create_key),
+    _descriptor.FieldDescriptor(
+      name='dev_id', full_name='context.EndPointId.dev_id', index=1,
+      number=2, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR,  create_key=_descriptor._internal_create_key),
+    _descriptor.FieldDescriptor(
+      name='port_id', full_name='context.EndPointId.port_id', index=2,
+      number=3, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR,  create_key=_descriptor._internal_create_key),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  serialized_options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=849,
+  serialized_end=965,
+)
+
+
+_DEVICEID = _descriptor.Descriptor(
+  name='DeviceId',
+  full_name='context.DeviceId',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  create_key=_descriptor._internal_create_key,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='device_id', full_name='context.DeviceId.device_id', index=0,
+      number=1, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR,  create_key=_descriptor._internal_create_key),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  serialized_options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=967,
+  serialized_end=1011,
+)
+
+
+_UUID = _descriptor.Descriptor(
+  name='Uuid',
+  full_name='context.Uuid',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  create_key=_descriptor._internal_create_key,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='uuid', full_name='context.Uuid.uuid', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=b"".decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR,  create_key=_descriptor._internal_create_key),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  serialized_options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=1013,
+  serialized_end=1033,
+)
+
+
+_TERAFLOWCONTROLLER = _descriptor.Descriptor(
+  name='TeraFlowController',
+  full_name='context.TeraFlowController',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  create_key=_descriptor._internal_create_key,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='ctl_id', full_name='context.TeraFlowController.ctl_id', index=0,
+      number=1, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR,  create_key=_descriptor._internal_create_key),
+    _descriptor.FieldDescriptor(
+      name='ipaddress', full_name='context.TeraFlowController.ipaddress', index=1,
+      number=2, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=b"".decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR,  create_key=_descriptor._internal_create_key),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  serialized_options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=1035,
+  serialized_end=1110,
+)
+
+
+_AUTHENTICATIONRESULT = _descriptor.Descriptor(
+  name='AuthenticationResult',
+  full_name='context.AuthenticationResult',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  create_key=_descriptor._internal_create_key,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='ctl_id', full_name='context.AuthenticationResult.ctl_id', index=0,
+      number=1, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR,  create_key=_descriptor._internal_create_key),
+    _descriptor.FieldDescriptor(
+      name='authenticated', full_name='context.AuthenticationResult.authenticated', index=1,
+      number=2, type=8, cpp_type=7, label=1,
+      has_default_value=False, default_value=False,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR,  create_key=_descriptor._internal_create_key),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  serialized_options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=1112,
+  serialized_end=1193,
+)
+
+_CONTEXT.fields_by_name['contextId'].message_type = _CONTEXTID
+_CONTEXT.fields_by_name['topo'].message_type = _TOPOLOGY
+_CONTEXT.fields_by_name['ctl'].message_type = _TERAFLOWCONTROLLER
+_CONTEXTID.fields_by_name['contextUuid'].message_type = _UUID
+_TOPOLOGY.fields_by_name['topoId'].message_type = _TOPOLOGYID
+_TOPOLOGY.fields_by_name['device'].message_type = _DEVICE
+_TOPOLOGY.fields_by_name['link'].message_type = _LINK
+_LINK.fields_by_name['endpointList'].message_type = _ENDPOINTID
+_TOPOLOGYID.fields_by_name['contextId'].message_type = _CONTEXTID
+_TOPOLOGYID.fields_by_name['topoId'].message_type = _UUID
+_DEVICE.fields_by_name['device_id'].message_type = _DEVICEID
+_DEVICE.fields_by_name['device_config'].message_type = _DEVICECONFIG
+_DEVICE.fields_by_name['devOperationalStatus'].enum_type = _DEVICEOPERATIONALSTATUS
+_DEVICE.fields_by_name['endpointList'].message_type = _ENDPOINT
+_ENDPOINT.fields_by_name['port_id'].message_type = _ENDPOINTID
+_ENDPOINTID.fields_by_name['topoId'].message_type = _TOPOLOGYID
+_ENDPOINTID.fields_by_name['dev_id'].message_type = _DEVICEID
+_ENDPOINTID.fields_by_name['port_id'].message_type = _UUID
+_DEVICEID.fields_by_name['device_id'].message_type = _UUID
+_TERAFLOWCONTROLLER.fields_by_name['ctl_id'].message_type = _CONTEXTID
+_AUTHENTICATIONRESULT.fields_by_name['ctl_id'].message_type = _CONTEXTID
+DESCRIPTOR.message_types_by_name['Empty'] = _EMPTY
+DESCRIPTOR.message_types_by_name['Context'] = _CONTEXT
+DESCRIPTOR.message_types_by_name['ContextId'] = _CONTEXTID
+DESCRIPTOR.message_types_by_name['Topology'] = _TOPOLOGY
+DESCRIPTOR.message_types_by_name['Link'] = _LINK
+DESCRIPTOR.message_types_by_name['TopologyId'] = _TOPOLOGYID
+DESCRIPTOR.message_types_by_name['Constraint'] = _CONSTRAINT
+DESCRIPTOR.message_types_by_name['Device'] = _DEVICE
+DESCRIPTOR.message_types_by_name['DeviceConfig'] = _DEVICECONFIG
+DESCRIPTOR.message_types_by_name['EndPoint'] = _ENDPOINT
+DESCRIPTOR.message_types_by_name['EndPointId'] = _ENDPOINTID
+DESCRIPTOR.message_types_by_name['DeviceId'] = _DEVICEID
+DESCRIPTOR.message_types_by_name['Uuid'] = _UUID
+DESCRIPTOR.message_types_by_name['TeraFlowController'] = _TERAFLOWCONTROLLER
+DESCRIPTOR.message_types_by_name['AuthenticationResult'] = _AUTHENTICATIONRESULT
+DESCRIPTOR.enum_types_by_name['DeviceOperationalStatus'] = _DEVICEOPERATIONALSTATUS
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+Empty = _reflection.GeneratedProtocolMessageType('Empty', (_message.Message,), {
+  'DESCRIPTOR' : _EMPTY,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.Empty)
+  })
+_sym_db.RegisterMessage(Empty)
+
+Context = _reflection.GeneratedProtocolMessageType('Context', (_message.Message,), {
+  'DESCRIPTOR' : _CONTEXT,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.Context)
+  })
+_sym_db.RegisterMessage(Context)
+
+ContextId = _reflection.GeneratedProtocolMessageType('ContextId', (_message.Message,), {
+  'DESCRIPTOR' : _CONTEXTID,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.ContextId)
+  })
+_sym_db.RegisterMessage(ContextId)
+
+Topology = _reflection.GeneratedProtocolMessageType('Topology', (_message.Message,), {
+  'DESCRIPTOR' : _TOPOLOGY,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.Topology)
+  })
+_sym_db.RegisterMessage(Topology)
+
+Link = _reflection.GeneratedProtocolMessageType('Link', (_message.Message,), {
+  'DESCRIPTOR' : _LINK,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.Link)
+  })
+_sym_db.RegisterMessage(Link)
+
+TopologyId = _reflection.GeneratedProtocolMessageType('TopologyId', (_message.Message,), {
+  'DESCRIPTOR' : _TOPOLOGYID,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.TopologyId)
+  })
+_sym_db.RegisterMessage(TopologyId)
+
+Constraint = _reflection.GeneratedProtocolMessageType('Constraint', (_message.Message,), {
+  'DESCRIPTOR' : _CONSTRAINT,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.Constraint)
+  })
+_sym_db.RegisterMessage(Constraint)
+
+Device = _reflection.GeneratedProtocolMessageType('Device', (_message.Message,), {
+  'DESCRIPTOR' : _DEVICE,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.Device)
+  })
+_sym_db.RegisterMessage(Device)
+
+DeviceConfig = _reflection.GeneratedProtocolMessageType('DeviceConfig', (_message.Message,), {
+  'DESCRIPTOR' : _DEVICECONFIG,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.DeviceConfig)
+  })
+_sym_db.RegisterMessage(DeviceConfig)
+
+EndPoint = _reflection.GeneratedProtocolMessageType('EndPoint', (_message.Message,), {
+  'DESCRIPTOR' : _ENDPOINT,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.EndPoint)
+  })
+_sym_db.RegisterMessage(EndPoint)
+
+EndPointId = _reflection.GeneratedProtocolMessageType('EndPointId', (_message.Message,), {
+  'DESCRIPTOR' : _ENDPOINTID,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.EndPointId)
+  })
+_sym_db.RegisterMessage(EndPointId)
+
+DeviceId = _reflection.GeneratedProtocolMessageType('DeviceId', (_message.Message,), {
+  'DESCRIPTOR' : _DEVICEID,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.DeviceId)
+  })
+_sym_db.RegisterMessage(DeviceId)
+
+Uuid = _reflection.GeneratedProtocolMessageType('Uuid', (_message.Message,), {
+  'DESCRIPTOR' : _UUID,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.Uuid)
+  })
+_sym_db.RegisterMessage(Uuid)
+
+TeraFlowController = _reflection.GeneratedProtocolMessageType('TeraFlowController', (_message.Message,), {
+  'DESCRIPTOR' : _TERAFLOWCONTROLLER,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.TeraFlowController)
+  })
+_sym_db.RegisterMessage(TeraFlowController)
+
+AuthenticationResult = _reflection.GeneratedProtocolMessageType('AuthenticationResult', (_message.Message,), {
+  'DESCRIPTOR' : _AUTHENTICATIONRESULT,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.AuthenticationResult)
+  })
+_sym_db.RegisterMessage(AuthenticationResult)
+
+
+
+_CONTEXTSERVICE = _descriptor.ServiceDescriptor(
+  name='ContextService',
+  full_name='context.ContextService',
+  file=DESCRIPTOR,
+  index=0,
+  serialized_options=None,
+  create_key=_descriptor._internal_create_key,
+  serialized_start=1249,
+  serialized_end=1317,
+  methods=[
+  _descriptor.MethodDescriptor(
+    name='GetTopology',
+    full_name='context.ContextService.GetTopology',
+    index=0,
+    containing_service=None,
+    input_type=_EMPTY,
+    output_type=_TOPOLOGY,
+    serialized_options=None,
+    create_key=_descriptor._internal_create_key,
+  ),
+])
+_sym_db.RegisterServiceDescriptor(_CONTEXTSERVICE)
+
+DESCRIPTOR.services_by_name['ContextService'] = _CONTEXTSERVICE
+
+# @@protoc_insertion_point(module_scope)
diff --git a/src/device/proto/context_pb2_grpc.py b/src/device/proto/context_pb2_grpc.py
new file mode 100644
index 0000000000000000000000000000000000000000..f1ff4672a33091ca7fb800aec2af49795050dd03
--- /dev/null
+++ b/src/device/proto/context_pb2_grpc.py
@@ -0,0 +1,66 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+"""Client and server classes corresponding to protobuf-defined services."""
+import grpc
+
+from device.proto import context_pb2 as context__pb2
+
+
+class ContextServiceStub(object):
+    """Missing associated documentation comment in .proto file."""
+
+    def __init__(self, channel):
+        """Constructor.
+
+        Args:
+            channel: A grpc.Channel.
+        """
+        self.GetTopology = channel.unary_unary(
+                '/context.ContextService/GetTopology',
+                request_serializer=context__pb2.Empty.SerializeToString,
+                response_deserializer=context__pb2.Topology.FromString,
+                )
+
+
+class ContextServiceServicer(object):
+    """Missing associated documentation comment in .proto file."""
+
+    def GetTopology(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+
+def add_ContextServiceServicer_to_server(servicer, server):
+    rpc_method_handlers = {
+            'GetTopology': grpc.unary_unary_rpc_method_handler(
+                    servicer.GetTopology,
+                    request_deserializer=context__pb2.Empty.FromString,
+                    response_serializer=context__pb2.Topology.SerializeToString,
+            ),
+    }
+    generic_handler = grpc.method_handlers_generic_handler(
+            'context.ContextService', rpc_method_handlers)
+    server.add_generic_rpc_handlers((generic_handler,))
+
+
+ # This class is part of an EXPERIMENTAL API.
+class ContextService(object):
+    """Missing associated documentation comment in .proto file."""
+
+    @staticmethod
+    def GetTopology(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_unary(request, target, '/context.ContextService/GetTopology',
+            context__pb2.Empty.SerializeToString,
+            context__pb2.Topology.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
diff --git a/src/device/proto/device_pb2.py b/src/device/proto/device_pb2.py
new file mode 100644
index 0000000000000000000000000000000000000000..d561c2a39f02e01bd6584ffa03b848e3d3c1b9c8
--- /dev/null
+++ b/src/device/proto/device_pb2.py
@@ -0,0 +1,78 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler.  DO NOT EDIT!
+# source: device.proto
+"""Generated protocol buffer code."""
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from device.proto import context_pb2 as context__pb2
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+  name='device.proto',
+  package='device',
+  syntax='proto3',
+  serialized_options=None,
+  create_key=_descriptor._internal_create_key,
+  serialized_pb=b'\n\x0c\x64\x65vice.proto\x12\x06\x64\x65vice\x1a\rcontext.proto2\xb6\x01\n\rDeviceService\x12\x31\n\tAddDevice\x12\x0f.context.Device\x1a\x11.context.DeviceId\"\x00\x12=\n\x0f\x43onfigureDevice\x12\x15.context.DeviceConfig\x1a\x11.context.DeviceId\"\x00\x12\x33\n\x0c\x44\x65leteDevice\x12\x11.context.DeviceId\x1a\x0e.context.Empty\"\x00\x62\x06proto3'
+  ,
+  dependencies=[context__pb2.DESCRIPTOR,])
+
+
+
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+
+
+_DEVICESERVICE = _descriptor.ServiceDescriptor(
+  name='DeviceService',
+  full_name='device.DeviceService',
+  file=DESCRIPTOR,
+  index=0,
+  serialized_options=None,
+  create_key=_descriptor._internal_create_key,
+  serialized_start=40,
+  serialized_end=222,
+  methods=[
+  _descriptor.MethodDescriptor(
+    name='AddDevice',
+    full_name='device.DeviceService.AddDevice',
+    index=0,
+    containing_service=None,
+    input_type=context__pb2._DEVICE,
+    output_type=context__pb2._DEVICEID,
+    serialized_options=None,
+    create_key=_descriptor._internal_create_key,
+  ),
+  _descriptor.MethodDescriptor(
+    name='ConfigureDevice',
+    full_name='device.DeviceService.ConfigureDevice',
+    index=1,
+    containing_service=None,
+    input_type=context__pb2._DEVICECONFIG,
+    output_type=context__pb2._DEVICEID,
+    serialized_options=None,
+    create_key=_descriptor._internal_create_key,
+  ),
+  _descriptor.MethodDescriptor(
+    name='DeleteDevice',
+    full_name='device.DeviceService.DeleteDevice',
+    index=2,
+    containing_service=None,
+    input_type=context__pb2._DEVICEID,
+    output_type=context__pb2._EMPTY,
+    serialized_options=None,
+    create_key=_descriptor._internal_create_key,
+  ),
+])
+_sym_db.RegisterServiceDescriptor(_DEVICESERVICE)
+
+DESCRIPTOR.services_by_name['DeviceService'] = _DEVICESERVICE
+
+# @@protoc_insertion_point(module_scope)
diff --git a/src/device/proto/device_pb2_grpc.py b/src/device/proto/device_pb2_grpc.py
new file mode 100644
index 0000000000000000000000000000000000000000..fd8c6bf32128f6390a0bc6a2a38a15d86758158c
--- /dev/null
+++ b/src/device/proto/device_pb2_grpc.py
@@ -0,0 +1,132 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+"""Client and server classes corresponding to protobuf-defined services."""
+import grpc
+
+from device.proto import context_pb2 as context__pb2
+
+
+class DeviceServiceStub(object):
+    """Missing associated documentation comment in .proto file."""
+
+    def __init__(self, channel):
+        """Constructor.
+
+        Args:
+            channel: A grpc.Channel.
+        """
+        self.AddDevice = channel.unary_unary(
+                '/device.DeviceService/AddDevice',
+                request_serializer=context__pb2.Device.SerializeToString,
+                response_deserializer=context__pb2.DeviceId.FromString,
+                )
+        self.ConfigureDevice = channel.unary_unary(
+                '/device.DeviceService/ConfigureDevice',
+                request_serializer=context__pb2.DeviceConfig.SerializeToString,
+                response_deserializer=context__pb2.DeviceId.FromString,
+                )
+        self.DeleteDevice = channel.unary_unary(
+                '/device.DeviceService/DeleteDevice',
+                request_serializer=context__pb2.DeviceId.SerializeToString,
+                response_deserializer=context__pb2.Empty.FromString,
+                )
+
+
+class DeviceServiceServicer(object):
+    """Missing associated documentation comment in .proto file."""
+
+    def AddDevice(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+    def ConfigureDevice(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+    def DeleteDevice(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+
+def add_DeviceServiceServicer_to_server(servicer, server):
+    rpc_method_handlers = {
+            'AddDevice': grpc.unary_unary_rpc_method_handler(
+                    servicer.AddDevice,
+                    request_deserializer=context__pb2.Device.FromString,
+                    response_serializer=context__pb2.DeviceId.SerializeToString,
+            ),
+            'ConfigureDevice': grpc.unary_unary_rpc_method_handler(
+                    servicer.ConfigureDevice,
+                    request_deserializer=context__pb2.DeviceConfig.FromString,
+                    response_serializer=context__pb2.DeviceId.SerializeToString,
+            ),
+            'DeleteDevice': grpc.unary_unary_rpc_method_handler(
+                    servicer.DeleteDevice,
+                    request_deserializer=context__pb2.DeviceId.FromString,
+                    response_serializer=context__pb2.Empty.SerializeToString,
+            ),
+    }
+    generic_handler = grpc.method_handlers_generic_handler(
+            'device.DeviceService', rpc_method_handlers)
+    server.add_generic_rpc_handlers((generic_handler,))
+
+
+ # This class is part of an EXPERIMENTAL API.
+class DeviceService(object):
+    """Missing associated documentation comment in .proto file."""
+
+    @staticmethod
+    def AddDevice(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_unary(request, target, '/device.DeviceService/AddDevice',
+            context__pb2.Device.SerializeToString,
+            context__pb2.DeviceId.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+    @staticmethod
+    def ConfigureDevice(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_unary(request, target, '/device.DeviceService/ConfigureDevice',
+            context__pb2.DeviceConfig.SerializeToString,
+            context__pb2.DeviceId.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+    @staticmethod
+    def DeleteDevice(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_unary(request, target, '/device.DeviceService/DeleteDevice',
+            context__pb2.DeviceId.SerializeToString,
+            context__pb2.Empty.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
diff --git a/src/device/run_integration_tests.sh b/src/device/run_integration_tests.sh
new file mode 100755
index 0000000000000000000000000000000000000000..616397a898bc49960eeb9988b526968625e6f904
--- /dev/null
+++ b/src/device/run_integration_tests.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+# Make folder containing the script the root folder for its execution
+cd $(dirname $0)
+
+ENDPOINT=($(kubectl --namespace teraflow-development get service deviceservice -o 'jsonpath={.spec.clusterIP} {.spec.ports[?(@.name=="grpc")].port}'))
+docker run -it --env TEST_TARGET_ADDRESS=${ENDPOINT[0]} --env TEST_TARGET_PORT=${ENDPOINT[1]} device_service:test
diff --git a/src/device/run_unitary_tests.sh b/src/device/run_unitary_tests.sh
new file mode 100755
index 0000000000000000000000000000000000000000..08e941f31502fe8dc32ffcfc1563c2223bb4d8d3
--- /dev/null
+++ b/src/device/run_unitary_tests.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+# Make folder containing the script the root folder for its execution
+cd $(dirname $0)
+
+mkdir -p data
+pytest -v --log-level=DEBUG tests/test_unitary.py
diff --git a/src/device/service/DeviceService.py b/src/device/service/DeviceService.py
new file mode 100644
index 0000000000000000000000000000000000000000..11cc13d47b249760c20d1e7bf26fc65e7160ce1b
--- /dev/null
+++ b/src/device/service/DeviceService.py
@@ -0,0 +1,55 @@
+import grpc
+import logging
+from concurrent import futures
+from grpc_health.v1.health import HealthServicer, OVERALL_HEALTH
+from grpc_health.v1.health_pb2 import HealthCheckResponse
+from grpc_health.v1.health_pb2_grpc import add_HealthServicer_to_server
+from device.proto.device_pb2_grpc import add_DeviceServiceServicer_to_server
+from device.service.DeviceServiceServicerImpl import DeviceServiceServicerImpl
+from device.Config import SERVICE_PORT, MAX_WORKERS, GRACE_PERIOD
+
+BIND_ADDRESS = '0.0.0.0'
+LOGGER = logging.getLogger(__name__)
+
+class DeviceService:
+    def __init__(self, database, address=BIND_ADDRESS, port=SERVICE_PORT, max_workers=MAX_WORKERS,
+                 grace_period=GRACE_PERIOD):
+        self.database = database
+        self.address = address
+        self.port = port
+        self.endpoint = None
+        self.max_workers = max_workers
+        self.grace_period = grace_period
+        self.device_servicer = None
+        self.health_servicer = None
+        self.pool = None
+        self.server = None
+
+    def start(self):
+        self.endpoint = '{}:{}'.format(self.address, self.port)
+        LOGGER.debug('Starting Service (tentative endpoint: {}, max_workers: {})...'.format(
+            self.endpoint, self.max_workers))
+
+        self.pool = futures.ThreadPoolExecutor(max_workers=self.max_workers)
+        self.server = grpc.server(self.pool) # , interceptors=(tracer_interceptor,))
+
+        self.device_servicer = DeviceServiceServicerImpl(self.database)
+        add_DeviceServiceServicer_to_server(self.device_servicer, self.server)
+
+        self.health_servicer = HealthServicer(
+            experimental_non_blocking=True, experimental_thread_pool=futures.ThreadPoolExecutor(max_workers=1))
+        add_HealthServicer_to_server(self.health_servicer, self.server)
+
+        port = self.server.add_insecure_port(self.endpoint)
+        self.endpoint = '{}:{}'.format(self.address, port)
+        LOGGER.info('Listening on {}...'.format(self.endpoint))
+        self.server.start()
+        self.health_servicer.set(OVERALL_HEALTH, HealthCheckResponse.SERVING) # pylint: disable=maybe-no-member
+
+        LOGGER.debug('Service started')
+
+    def stop(self):
+        LOGGER.debug('Stopping service (grace period {} seconds)...'.format(self.grace_period))
+        self.health_servicer.enter_graceful_shutdown()
+        self.server.stop(self.grace_period)
+        LOGGER.debug('Service stopped')
diff --git a/src/device/service/DeviceServiceServicerImpl.py b/src/device/service/DeviceServiceServicerImpl.py
new file mode 100644
index 0000000000000000000000000000000000000000..dc946bc972c9e01409e4d3cd178b754100cec5c7
--- /dev/null
+++ b/src/device/service/DeviceServiceServicerImpl.py
@@ -0,0 +1,88 @@
+import grpc, logging
+from prometheus_client import Counter, Histogram
+from google.protobuf.json_format import MessageToDict
+from context.proto.context_pb2 import DeviceId, Empty
+from device.proto.device_pb2_grpc import DeviceServiceServicer
+
+LOGGER = logging.getLogger(__name__)
+
+ADDDEVICE_COUNTER_STARTED    = Counter  ('device_adddevice_counter_started',
+                                          'Device:AddDevice counter of requests started'  )
+ADDDEVICE_COUNTER_COMPLETED  = Counter  ('device_adddevice_counter_completed',
+                                          'Device:AddDevice counter of requests completed')
+ADDDEVICE_COUNTER_FAILED     = Counter  ('device_adddevice_counter_failed',
+                                          'Device:AddDevice counter of requests failed'   )
+ADDDEVICE_HISTOGRAM_DURATION = Histogram('device_adddevice_histogram_duration',
+                                          'Device:AddDevice histogram of request duration')
+
+CONFIGUREDEVICE_COUNTER_STARTED    = Counter  ('device_configuredevice_counter_started',
+                                               'Device:ConfigureDevice counter of requests started'  )
+CONFIGUREDEVICE_COUNTER_COMPLETED  = Counter  ('device_configuredevice_counter_completed',
+                                               'Device:ConfigureDevice counter of requests completed')
+CONFIGUREDEVICE_COUNTER_FAILED     = Counter  ('device_configuredevice_counter_failed',
+                                               'Device:ConfigureDevice counter of requests failed'   )
+CONFIGUREDEVICE_HISTOGRAM_DURATION = Histogram('device_configuredevice_histogram_duration',
+                                               'Device:ConfigureDevice histogram of request duration')
+
+DELETEDEVICE_COUNTER_STARTED    = Counter  ('device_deletedevice_counter_started',
+                                            'Device:DeleteDevice counter of requests started'  )
+DELETEDEVICE_COUNTER_COMPLETED  = Counter  ('device_deletedevice_counter_completed',
+                                            'Device:DeleteDevice counter of requests completed')
+DELETEDEVICE_COUNTER_FAILED     = Counter  ('device_deletedevice_counter_failed',
+                                            'Device:DeleteDevice counter of requests failed'   )
+DELETEDEVICE_HISTOGRAM_DURATION = Histogram('device_deletedevice_histogram_duration',
+                                            'Device:DeleteDevice histogram of request duration')
+
+class DeviceServiceServicerImpl(DeviceServiceServicer):
+    def __init__(self, database):
+        LOGGER.debug('Creating Servicer...')
+        self.database = database
+        LOGGER.debug('Servicer Created')
+
+    @ADDDEVICE_HISTOGRAM_DURATION.time()
+    def AddDevice(self, request, context):
+        # request=context.Device(), returns=context.DeviceId()
+        ADDDEVICE_COUNTER_STARTED.inc()
+        try:
+            LOGGER.info('AddDevice request: {}'.format(str(request)))
+            reply = DeviceId(**self.database.add_device(MessageToDict(request)))
+            LOGGER.info('AddDevice reply: {}'.format(str(reply)))
+            ADDDEVICE_COUNTER_COMPLETED.inc()
+            return reply
+        except:
+            LOGGER.exception('AddDevice exception')
+            ADDDEVICE_COUNTER_FAILED.inc()
+            context.set_code(grpc.StatusCode.INTERNAL)
+            return DeviceId()
+
+    @CONFIGUREDEVICE_HISTOGRAM_DURATION.time()
+    def ConfigureDevice(self, request, context):
+        # request=context.DeviceConfig(), returns=context.DeviceId()
+        CONFIGUREDEVICE_COUNTER_STARTED.inc()
+        try:
+            LOGGER.info('ConfigureDevice request: {}'.format(str(request)))
+            reply = DeviceId(**self.database.configure_device(MessageToDict(request)))
+            LOGGER.info('ConfigureDevice reply: {}'.format(str(reply)))
+            CONFIGUREDEVICE_COUNTER_COMPLETED.inc()
+            return reply
+        except:
+            LOGGER.exception('ConfigureDevice exception')
+            CONFIGUREDEVICE_COUNTER_FAILED.inc()
+            context.set_code(grpc.StatusCode.INTERNAL)
+            return DeviceId()
+
+    @DELETEDEVICE_HISTOGRAM_DURATION.time()
+    def DeleteDevice(self, request, context):
+        # request=context.DeviceId(), returns=context.Empty()
+        DELETEDEVICE_COUNTER_STARTED.inc()
+        try:
+            LOGGER.info('DeleteDevice request: {}'.format(str(request)))
+            reply = Empty(**self.database.delete_device(MessageToDict(request)))
+            LOGGER.info('DeleteDevice reply: {}'.format(str(reply)))
+            DELETEDEVICE_COUNTER_COMPLETED.inc()
+            return reply
+        except:
+            LOGGER.exception('DeleteDevice exception')
+            DELETEDEVICE_COUNTER_FAILED.inc()
+            context.set_code(grpc.StatusCode.INTERNAL)
+            return Empty()
diff --git a/src/device/service/__init__.py b/src/device/service/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/src/device/service/__main__.py b/src/device/service/__main__.py
new file mode 100644
index 0000000000000000000000000000000000000000..81791277e88aa7a4e96ab7f69de43f7011995ca5
--- /dev/null
+++ b/src/device/service/__main__.py
@@ -0,0 +1,52 @@
+import logging, os, signal, sys, threading
+from prometheus_client import start_http_server
+from device.database.Factory import get_database
+from device.service.DeviceService import DeviceService
+from device.Config import SERVICE_PORT, MAX_WORKERS, GRACE_PERIOD, LOG_LEVEL, METRICS_PORT
+
+terminate = threading.Event()
+logger = None
+
+def signal_handler(signal, frame):
+    global terminate, logger
+    logger.warning('Terminate signal received')
+    terminate.set()
+
+def main():
+    global terminate, logger
+
+    service_port = os.environ.get('DEVICESERVICE_SERVICE_PORT_GRPC', SERVICE_PORT)
+    max_workers  = os.environ.get('MAX_WORKERS',  MAX_WORKERS )
+    grace_period = os.environ.get('GRACE_PERIOD', GRACE_PERIOD)
+    log_level    = os.environ.get('LOG_LEVEL',    LOG_LEVEL   )
+    metrics_port = os.environ.get('METRICS_PORT', METRICS_PORT)
+
+    logging.basicConfig(level=log_level)
+    logger = logging.getLogger(__name__)
+
+    signal.signal(signal.SIGINT,  signal_handler)
+    signal.signal(signal.SIGTERM, signal_handler)
+
+    logger.info('Starting...')
+
+    # Start metrics server
+    start_http_server(metrics_port)
+
+    # Get database instance
+    database = get_database()
+
+    # Starting device service
+    service = DeviceService(database, port=service_port, max_workers=max_workers, grace_period=grace_period)
+    service.start()
+
+    # Wait for Ctrl+C or termination signal
+    while not terminate.wait(0.1): pass
+
+    logger.info('Terminating...')
+    service.stop()
+
+    logger.info('Bye')
+    return(0)
+
+if __name__ == '__main__':
+    sys.exit(main())
diff --git a/src/device/tests/Dockerfile b/src/device/tests/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..b1f6411c39007897e3d0bc0eb7506545be57c524
--- /dev/null
+++ b/src/device/tests/Dockerfile
@@ -0,0 +1,4 @@
+FROM device_service:develop
+
+# Run integration tests
+ENTRYPOINT ["pytest", "-v", "--log-level=DEBUG", "device/tests/test_integration.py"]
diff --git a/src/device/tests/__init__.py b/src/device/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/src/device/tests/test_integration.py b/src/device/tests/test_integration.py
new file mode 100644
index 0000000000000000000000000000000000000000..eab068b493a06754ec335ea118fa60e671fddec7
--- /dev/null
+++ b/src/device/tests/test_integration.py
@@ -0,0 +1,24 @@
+import logging, os, pytest, sys
+
+from pathlib import Path
+sys.path.append(__file__.split('src')[0] + 'src')
+print(sys.path)
+
+from context.client.ContextClient import ContextClient
+from context.proto.context_pb2 import Empty
+from .tools.ValidateTopology import validate_topology_dict
+
+LOGGER = logging.getLogger(__name__)
+LOGGER.setLevel(logging.DEBUG)
+
+@pytest.fixture(scope='session')
+def remote_context_client():
+    address = os.environ.get('TEST_TARGET_ADDRESS')
+    if(address is None): raise Exception('EnvironmentVariable(TEST_TARGET_ADDRESS) not specified')
+    port = os.environ.get('TEST_TARGET_PORT')
+    if(port is None): raise Exception('EnvironmentVariable(TEST_TARGET_PORT) not specified')
+    return ContextClient(address=address, port=port)
+
+def test_remote_get_topology(remote_context_client):
+    response = remote_context_client.GetTopology(Empty())
+    validate_topology_dict(response)
diff --git a/src/device/tests/test_unitary.py b/src/device/tests/test_unitary.py
new file mode 100644
index 0000000000000000000000000000000000000000..61e580ea704260cd034273e2bd74ae9fbbd606e6
--- /dev/null
+++ b/src/device/tests/test_unitary.py
@@ -0,0 +1,31 @@
+import logging, pytest, sys
+
+from pathlib import Path
+sys.path.append(__file__.split('src')[0] + 'src')
+print(sys.path)
+
+from context.client.ContextClient import ContextClient
+from context.database.Factory import get_database, DatabaseEngineEnum
+from context.proto.context_pb2 import Empty
+from context.service.ContextService import ContextService
+from context.Config import SERVICE_PORT, MAX_WORKERS, GRACE_PERIOD
+from context.tests.tools.ValidateTopology import validate_topology_dict
+
+LOGGER = logging.getLogger(__name__)
+LOGGER.setLevel(logging.DEBUG)
+
+@pytest.fixture(scope='session')
+def local_context_service():
+    database = get_database(engine=DatabaseEngineEnum.INMEMORY, filepath='data/topo_nsfnet.json')
+    _service = ContextService(database, port=SERVICE_PORT, max_workers=MAX_WORKERS, grace_period=GRACE_PERIOD)
+    _service.start()
+    yield _service
+    _service.stop()
+
+@pytest.fixture(scope='session')
+def local_context_client(local_context_service):
+    return ContextClient(address='127.0.0.1', port=SERVICE_PORT)
+
+def test_local_get_topology(local_context_client):
+    response = local_context_client.GetTopology(Empty())
+    validate_topology_dict(response)
diff --git a/src/device/tests/tools/ValidateTopology.py b/src/device/tests/tools/ValidateTopology.py
new file mode 100644
index 0000000000000000000000000000000000000000..b52546e39c27292bec4f11755dade987929e5e71
--- /dev/null
+++ b/src/device/tests/tools/ValidateTopology.py
@@ -0,0 +1,6 @@
+def validate_topology_dict(topology):
+    assert type(topology) is dict
+    assert len(topology.keys()) > 0
+    assert 'topoId' in topology
+    assert 'device' in topology
+    assert 'link' in topology
diff --git a/src/device/tests/tools/__init__.py b/src/device/tests/tools/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391