diff --git a/deploy/tfs.sh b/deploy/tfs.sh
index 3fdbe77fb502c42aaf7dd507ab239f6b3bb20056..e0d75dd287a447b348f8b47e22b6dc4570385f0c 100755
--- a/deploy/tfs.sh
+++ b/deploy/tfs.sh
@@ -212,24 +212,24 @@ for COMPONENT in $TFS_COMPONENTS; do
         BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}.log"
 
         if [ "$COMPONENT" == "ztp" ] || [ "$COMPONENT" == "policy" ]; then
-            $DOCKER_BUILD -t "$COMPONENT:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/Dockerfile ./src/"$COMPONENT"/ > "$BUILD_LOG"
+            $DOCKER_BUILD --network=host -t "$COMPONENT:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/Dockerfile ./src/"$COMPONENT"/ > "$BUILD_LOG"
         elif [ "$COMPONENT" == "pathcomp" ]; then
             BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-frontend.log"
-            $DOCKER_BUILD -t "$COMPONENT-frontend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/frontend/Dockerfile . > "$BUILD_LOG"
+            $DOCKER_BUILD --network=host -t "$COMPONENT-frontend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/frontend/Dockerfile . > "$BUILD_LOG"
 
             BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-backend.log"
-            $DOCKER_BUILD -t "$COMPONENT-backend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/backend/Dockerfile . > "$BUILD_LOG"
+            $DOCKER_BUILD --network=host -t "$COMPONENT-backend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/backend/Dockerfile . > "$BUILD_LOG"
             # next command is redundant, but helpful to keep cache updated between rebuilds
             IMAGE_NAME="$COMPONENT-backend:$TFS_IMAGE_TAG-builder"
-            $DOCKER_BUILD -t "$IMAGE_NAME" --target builder -f ./src/"$COMPONENT"/backend/Dockerfile . >> "$BUILD_LOG"
+            $DOCKER_BUILD --network=host -t "$IMAGE_NAME" --target builder -f ./src/"$COMPONENT"/backend/Dockerfile . >> "$BUILD_LOG"
         elif [ "$COMPONENT" == "dlt" ]; then
             BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-connector.log"
-            $DOCKER_BUILD -t "$COMPONENT-connector:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/connector/Dockerfile . > "$BUILD_LOG"
+            $DOCKER_BUILD --network=host -t "$COMPONENT-connector:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/connector/Dockerfile . > "$BUILD_LOG"
 
             BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-gateway.log"
-            $DOCKER_BUILD -t "$COMPONENT-gateway:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/gateway/Dockerfile . > "$BUILD_LOG"
+            $DOCKER_BUILD --network=host -t "$COMPONENT-gateway:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/gateway/Dockerfile . > "$BUILD_LOG"
         else
-            $DOCKER_BUILD -t "$COMPONENT:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/Dockerfile . > "$BUILD_LOG"
+            $DOCKER_BUILD --network=host -t "$COMPONENT:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/Dockerfile . > "$BUILD_LOG"
         fi
 
         echo "  Pushing Docker image to '$TFS_REGISTRY_IMAGES'..."
diff --git a/my_deploy.sh b/my_deploy.sh
index 8417f6eae510391e65d5f91202e59cccf32e1f98..3c8cc7ff3d5d0735c961a236f5cfefcef388c88d 100755
--- a/my_deploy.sh
+++ b/my_deploy.sh
@@ -20,7 +20,9 @@
 export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/"
 
 # Set the list of components, separated by spaces, you want to build images for, and deploy.
-export TFS_COMPONENTS="context device pathcomp service slice nbi webui load_generator"
+export TFS_COMPONENTS="context device pathcomp service slice nbi webui app"
+
+# export load_generator
 
 # Uncomment to activate Monitoring
 #export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring"
@@ -109,7 +111,7 @@ export CRDB_DEPLOY_MODE="single"
 export CRDB_DROP_DATABASE_IF_EXISTS=""
 
 # Disable flag for re-deploying CockroachDB from scratch.
-export CRDB_REDEPLOY=""
+export CRDB_REDEPLOY="YES"
 
 
 # ----- NATS -------------------------------------------------------------------
diff --git a/src/common/DeviceTypes.py b/src/common/DeviceTypes.py
index 9ed321d5328aa17a856a3a6401bc35576eef679f..23ebe19d681bd0ba774c8f3f4435c233369d0e28 100644
--- a/src/common/DeviceTypes.py
+++ b/src/common/DeviceTypes.py
@@ -47,6 +47,7 @@ class DeviceTypeEnum(Enum):
     PACKET_ROUTER                   = 'packet-router'
     PACKET_SWITCH                   = 'packet-switch'
     XR_CONSTELLATION                = 'xr-constellation'
+    QKD_NODE                        = 'qkd-node'
 
     # ETSI TeraFlowSDN controller
     TERAFLOWSDN_CONTROLLER          = 'teraflowsdn'
diff --git a/src/context/proto b/src/context/proto
new file mode 120000
index 0000000000000000000000000000000000000000..0ae252a7824cad03d85fa60224b87d8c779f1588
--- /dev/null
+++ b/src/context/proto
@@ -0,0 +1 @@
+../../proto/src/python
\ No newline at end of file
diff --git a/src/device/service/driver_api/driver_api/qkd_api.py b/src/device/service/driver_api/driver_api/qkd_api.py
new file mode 100644
index 0000000000000000000000000000000000000000..27b4718f068b256e943a85b492128226352dcb06
--- /dev/null
+++ b/src/device/service/driver_api/driver_api/qkd_api.py
@@ -0,0 +1,246 @@
+import sys
+import os
+import grpc
+import json
+from flask import Flask, jsonify, request, abort
+from common.proto.context_pb2 import Empty, ContextId, TopologyId, Uuid
+from common.proto.context_pb2_grpc import ContextServiceStub
+from device.service.driver_api.DriverInstanceCache import DriverInstanceCache, get_driver
+from device.service.driver_api.DriverFactory import DriverFactory
+from device.service.driver_api.FilterFields import FilterFieldEnum
+from device.service.driver_api._Driver import _Driver
+from device.service.drivers.qkd.QKDDriver2 import QKDDriver  
+app = Flask(__name__)
+
+# Add the directory containing the driver_api module to the system path
+sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../../..')))
+
+# Initialize the DriverFactory and DriverInstanceCache
+drivers_list = [
+    (QKDDriver, [{'filter_field1': 'value1', 'filter_field2': 'value2'}])
+]
+
+driver_factory = DriverFactory(drivers_list)
+driver_instance_cache = DriverInstanceCache(driver_factory)
+
+
+def get_context_topology_info():
+    try:
+        # Establish a gRPC channel
+        channel = grpc.insecure_channel('10.152.183.77:1010')  # Update with the correct IP and port
+        stub = ContextServiceStub(channel)
+
+        # Retrieve the context information
+        context_list = stub.ListContexts(Empty())
+        contexts_info = []
+        for context in context_list.contexts:
+            context_info = {
+                'context_id': context.context_id.context_uuid.uuid,
+                'context_name': context.name,
+                'topologies': []
+            }
+
+            # Retrieve topology information for each context
+            topology_list = stub.ListTopologies(context.context_id)
+            for topology in topology_list.topologies:
+                topology_info = {
+                    'topology_id': topology.topology_id.topology_uuid.uuid,
+                    'topology_name': topology.name,
+                    'devices': []
+                }
+
+                # Retrieve detailed topology information
+                topology_details = stub.GetTopologyDetails(topology.topology_id)
+                for device in topology_details.devices:
+                    device_info = {
+                        'device_id': device.device_id.device_uuid.uuid,
+                        'device_name': device.name,
+                        'device_type': device.device_type,
+                        'status': device.device_operational_status,
+                        'drivers': [driver for driver in device.device_drivers],
+                        'endpoints': [{
+                            'uuid': endpoint.endpoint_id.endpoint_uuid.uuid,
+                            'name': endpoint.name,
+                            'type': endpoint.endpoint_type,
+                            'location': endpoint.endpoint_location
+                        } for endpoint in device.device_endpoints],
+                        'configurations': [{
+                            'key': config.custom.resource_key,
+                            'value': config.custom.resource_value
+                        } for config in device.device_config.config_rules],
+                        'interfaces': [{
+                            'id': interface.qkdi_id,
+                            'enabled': interface.enabled,
+                            'name': interface.name,
+                            'att_point': interface.qkdi_att_point,
+                            'capabilities': interface.qkdi_capabilities
+                        } for interface in device.qkd_interfaces.qkd_interface],
+                        'applications': [{
+                            'app_id': app.app_id,
+                            'app_qos': app.app_qos,
+                            'app_statistics': app.app_statistics,
+                            'backing_qkdl_id': app.backing_qkdl_id,
+                            'client_app_id': app.client_app_id
+                        } for app in device.qkd_applications.qkd_app]
+                    }
+                    topology_info['devices'].append(device_info)
+                context_info['topologies'].append(topology_info)
+            contexts_info.append(context_info)
+        
+        return contexts_info
+    except grpc.RpcError as e:
+        app.logger.error(f"gRPC error: {e}")
+        abort(502, description=f"gRPC error: {e.details()}")
+    except Exception as e:
+        app.logger.error(f"Error retrieving context topology info: {e}")
+        abort(500, description="Internal Server Error")
+
+
+def get_detailed_device_info():
+    try:
+        context_info = get_context_topology_info()
+        detailed_info = []
+        for context in context_info:
+            if context['context_name'] == 'admin':
+                for topology in context['topologies']:
+                    if topology['topology_name'] == 'admin':
+                        for device in topology['devices']:
+                            driver = get_driver_instance(device)
+                            if driver:
+                                detailed_info.append({
+                                    'device_info': device,
+                                    'driver_info': get_device_driver_info(driver)
+                                })
+        return detailed_info
+    except Exception as e:
+        app.logger.error(f"Error retrieving detailed device info: {e}")
+        abort(500, description="Internal Server Error")
+
+
+def get_driver_instance(device):
+    device_uuid = device['device_id']
+    driver_filter_fields = {
+        FilterFieldEnum.DEVICE_TYPE: device['device_type'],
+        FilterFieldEnum.DRIVER: device['drivers'],
+    }
+    connect_rules = {config['key']: config['value'] for config in device['configurations']}
+    
+    address = connect_rules.get('_connect/address', '127.0.0.1')
+    port = int(connect_rules.get('_connect/port', '0'))
+    settings = json.loads(connect_rules.get('_connect/settings', '{}'))
+
+    try:
+        driver = driver_instance_cache.get(
+            device_uuid, filter_fields=driver_filter_fields, address=address, port=port, settings=settings)
+        if os.getenv('QKD_API_URL'):  # Assume real QKD system if QKD_API_URL is set
+            if not driver.Connect():
+                raise Exception("Failed to connect to real QKD system")
+        else:
+            driver.Connect()
+        return driver
+    except Exception as e:
+        app.logger.error(f"Failed to get driver instance for device {device_uuid}: {e}")
+        return None
+
+
+
+def get_device_driver_info(driver: _Driver):
+    try:
+        return {
+            'initial_config': driver.GetInitialConfig(),
+            'running_config': driver.GetConfig(),
+            'subscriptions': list(driver.GetState(blocking=False))
+        }
+    except Exception as e:
+        app.logger.error(f"Failed to retrieve driver information: {e}")
+        return {'error': str(e)}
+
+
+@app.route('/qkd/devices', methods=['GET'])
+def retrieve_qkd_devices():
+    try:
+        context_info = get_context_topology_info()
+        return jsonify(context_info), 200
+    except Exception as e:
+        app.logger.error(f"Error retrieving QKD devices: {e}")
+        return jsonify({'error': 'Internal Server Error'}), 500
+
+
+@app.route('/qkd/capabilities', methods=['GET'])
+def get_qkd_capabilities():
+    try:
+        context_info = get_context_topology_info()
+        for context in context_info:
+            if context['context_name'] == 'admin':
+                for topology in context['topologies']:
+                    if topology['topology_name'] == 'admin':
+                        for device in topology['devices']:
+                            driver = get_driver_instance(device)
+                            if driver:
+                                capabilities = driver.get_qkd_capabilities()
+                                return jsonify(capabilities), 200
+        abort(404, description="No capabilities found")
+    except Exception as e:
+        app.logger.error(f"Error retrieving QKD capabilities: {e}")
+        return jsonify({'error': 'Internal Server Error'}), 500
+
+
+@app.route('/qkd/interfaces', methods=['GET'])
+def get_qkd_interfaces():
+    try:
+        context_info = get_context_topology_info()
+        for context in context_info:
+            if context['context_name'] == 'admin':
+                for topology in context['topologies']:
+                    if topology['topology_name'] == 'admin':
+                        for device in topology['devices']:
+                            driver = get_driver_instance(device)
+                            if driver:
+                                interfaces = driver.get_qkd_interfaces()
+                                return jsonify(interfaces), 200
+        abort(404, description="No interfaces found")
+    except Exception as e:
+        app.logger.error(f"Error retrieving QKD interfaces: {e}")
+        return jsonify({'error': 'Internal Server Error'}), 500
+
+
+@app.route('/qkd/links', methods=['GET'])
+def get_qkd_links():
+    try:
+        context_info = get_context_topology_info()
+        for context in context_info:
+            if context['context_name'] == 'admin':
+                for topology in context['topologies']:
+                    if topology['topology_name'] == 'admin':
+                        for device in topology['devices']:
+                            driver = get_driver_instance(device)
+                            if driver:
+                                links = driver.get_qkd_links()
+                                return jsonify(links), 200
+        abort(404, description="No links found")
+    except Exception as e:
+        app.logger.error(f"Error retrieving QKD links: {e}")
+        return jsonify({'error': 'Internal Server Error'}), 500
+
+
+@app.route('/qkd/applications', methods=['GET'])
+def get_qkd_applications():
+    try:
+        context_info = get_context_topology_info()
+        for context in context_info:
+            if context['context_name'] == 'admin':
+                for topology in context['topologies']:
+                    if topology['topology_name'] == 'admin':
+                        for device in topology['devices']:
+                            driver = get_driver_instance(device)
+                            if driver:
+                                applications = driver.get_qkd_applications()
+                                return jsonify(applications), 200
+        abort(404, description="No applications found")
+    except Exception as e:
+        app.logger.error(f"Error retrieving QKD applications: {e}")
+        return jsonify({'error': 'Internal Server Error'}), 500
+
+
+if __name__ == '__main__':
+    app.run(debug=True, host='0.0.0.0', port=5000)
diff --git a/src/device/service/drivers/__init__.py b/src/device/service/drivers/__init__.py
index 573eb194e693956b94c9c200722ddee1c3e66ea0..a5e7f377113342b98203a23a426540f6188f784e 100644
--- a/src/device/service/drivers/__init__.py
+++ b/src/device/service/drivers/__init__.py
@@ -180,7 +180,7 @@ if LOAD_ALL_DEVICE_DRIVERS:
         ]))
 
 if LOAD_ALL_DEVICE_DRIVERS:
-    from .qkd.QKDDriver import QKDDriver # pylint: disable=wrong-import-position
+    from .qkd.QKDDriver2 import QKDDriver # pylint: disable=wrong-import-position
     DRIVERS.append(
         (QKDDriver, [
             {
diff --git a/src/device/service/drivers/qkd/LuxquantaDriver.py b/src/device/service/drivers/qkd/LuxquantaDriver.py
new file mode 100644
index 0000000000000000000000000000000000000000..e1640e1c553f3d21932db3e18fba2ac5447338ed
--- /dev/null
+++ b/src/device/service/drivers/qkd/LuxquantaDriver.py
@@ -0,0 +1,102 @@
+import grpc
+import json
+import requests
+from common.proto.context_pb2 import DeviceId, DeviceOperationalStatusEnum, ConfigRule, ConfigRule_Custom, Empty, ContextId, TopologyId, Uuid, ConfigActionEnum
+from common.proto.context_pb2_grpc import ContextServiceStub
+from common.DeviceTypes import DeviceTypeEnum
+from common.tools.grpc.ConfigRules import update_config_rule_custom
+
+def login_qkd(address, port, username, password):
+    url = f"http://{address}:{port}/login"
+    headers = {'Content-Type': 'application/x-www-form-urlencoded'}
+    data = {'username': username, 'password': password}
+    response = requests.post(url, headers=headers, data=data)
+    response.raise_for_status()
+    token = response.json().get('access_token')
+    return token
+
+def fetch_qkd_info(address, port, token, endpoint):
+    url = f"http://{address}:{port}{endpoint}"
+    headers = {'Authorization': f'Bearer {token}', 'accept': 'application/json'}
+    response = requests.get(url, headers=headers)
+    response.raise_for_status()
+    return response.json()
+
+def create_config_file(qkd_devices, filename):
+    json_structure = {
+        "contexts": [
+            {"context_id": {"context_uuid": {"uuid": "admin"}}}
+        ],
+        "topologies": [
+            {"topology_id": {"topology_uuid": {"uuid": "admin"}, "context_id": {"context_uuid": {"uuid": "admin"}}}}
+        ],
+        "devices": [],
+        "links": []
+    }
+
+    for device in qkd_devices:
+        device_entry = {
+            "device_id": {"device_uuid": {"uuid": device["uuid"]}},
+            "device_type": "qkd-node",
+            "device_operational_status": DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED,
+            "device_drivers": [12],  # Assuming 12 is the correct driver ID for QKD
+            "device_endpoints": [],
+            "device_config": {"config_rules": [
+                {"action": ConfigActionEnum.CONFIGACTION_SET, "custom": {"resource_key": "_connect/address", "resource_value": device["address"]}},
+                {"action": ConfigActionEnum.CONFIGACTION_SET, "custom": {"resource_key": "_connect/port", "resource_value": str(device["port"])}},
+                {"action": ConfigActionEnum.CONFIGACTION_SET, "custom": {"resource_key": "_connect/settings", "resource_value": json.dumps({"scheme": "http", "token": device["token"]})}}
+            ]}
+        }
+        json_structure["devices"].append(device_entry)
+
+        for interface in device["interfaces"]["qkd_interface"]:
+            endpoint_id = f"{device['address']}:{interface['qkdi_id']}"
+            device_entry["device_endpoints"].append({
+                "device_id": {"device_uuid": {"uuid": device["uuid"]}},
+                "endpoint_uuid": {"uuid": endpoint_id}
+            })
+
+        for link in device["links"]["qkd_links"]:
+            link_entry = {
+                "link_id": {"link_uuid": {"uuid": link["qkdl_id"]}},
+                "link_endpoint_ids": [
+                    {"device_id": {"device_uuid": {"uuid": device["uuid"]}}, "endpoint_uuid": {"uuid": f"{device['address']}:{link['qkdl_id']}"}}
+                    # You need to fetch and add the other endpoint details similarly
+                ]
+            }
+            json_structure["links"].append(link_entry)
+
+    with open(filename, 'w', encoding='utf-8') as f:
+        json.dump(json_structure, f, ensure_ascii=False, indent=4)
+
+def main():
+    qkd_devices = [
+        {
+            "uuid": "real_qkd1",
+            "address": "10.13.13.2",
+            "port": 5100,
+            "username": "admin",
+            "password": "password"
+        }
+        # Add more QKD devices if needed
+    ]
+
+    # Step 1: Authenticate and get the JWT tokens for each QKD device
+    for qkd_device in qkd_devices:
+        qkd_device['token'] = login_qkd(
+            qkd_device['address'], qkd_device['port'], qkd_device['username'], qkd_device['password'])
+
+    # Step 2: Fetch QKD device information
+    for qkd_device in qkd_devices:
+        qkd_device['capabilities'] = fetch_qkd_info(qkd_device['address'], qkd_device['port'], qkd_device['token'], '/restconf/data/etsi-qkd-sdn-node:qkd_node/qkdn_capabilities')
+        qkd_device['interfaces'] = fetch_qkd_info(qkd_device['address'], qkd_device['port'], qkd_device['token'], '/restconf/data/etsi-qkd-sdn-node:qkd_node/qkd_interfaces')
+        qkd_device['links'] = fetch_qkd_info(qkd_device['address'], qkd_device['port'], qkd_device['token'], '/restconf/data/etsi-qkd-sdn-node:qkd_node/qkd_links')
+        qkd_device['applications'] = fetch_qkd_info(qkd_device['address'], qkd_device['port'], qkd_device['token'], '/restconf/data/etsi-qkd-sdn-node:qkd_node/qkd_applications')
+    
+    # Step 3: Create config files for each QKD device
+    config_filename = "qkd_devices_config.json"
+    create_config_file(qkd_devices, config_filename)
+    print(f"Config file created: {config_filename}")
+
+if __name__ == "__main__":
+    main()
diff --git a/src/device/service/drivers/qkd/QKDDriver.py b/src/device/service/drivers/qkd/QKDDriver.py
index b8f7a8ebee93ae568df4dcab13e0ae03642dd46d..b304ec61a83fec11322ee56b94fe6a42183900c4 100644
--- a/src/device/service/drivers/qkd/QKDDriver.py
+++ b/src/device/service/drivers/qkd/QKDDriver.py
@@ -26,6 +26,8 @@ class QKDDriver(_Driver):
         self.__qkd_root = '{:s}://{:s}:{:d}'.format(scheme, self.address, int(self.port))
         self.__timeout = int(self.settings.get('timeout', 120))
         self.__node_ids = set(self.settings.get('node_ids', []))
+        token = self.settings.get('token')
+        self.__headers = {'Authorization': 'Bearer ' + token}
         self.__initial_data = None
 
     def Connect(self) -> bool:
@@ -34,7 +36,11 @@ class QKDDriver(_Driver):
             if self.__started.is_set(): return True
             r = None
             try:
-                r = requests.get(url, timeout=self.__timeout, verify=False, auth=self.__auth)
+                LOGGER.info(f'requests.get("{url}", timeout={self.__timeout}, verify=False, auth={self.__auth}, headers={self.__headers})')
+                r = requests.get(url, timeout=self.__timeout, verify=False, auth=self.__auth, headers=self.__headers)
+                LOGGER.info(f'R: {r}')
+                LOGGER.info(f'Text: {r.text}')
+                LOGGER.info(f'Json: {r.json()}')
             except requests.exceptions.Timeout:
                 LOGGER.exception('Timeout connecting {:s}'.format(str(self.__qkd_root)))
                 return False
@@ -67,7 +73,7 @@ class QKDDriver(_Driver):
                 chk_string(str_resource_name, resource_key, allow_empty=False)
                 results.extend(config_getter(
                     self.__qkd_root, resource_key, timeout=self.__timeout, auth=self.__auth,
-                    node_ids=self.__node_ids))
+                    node_ids=self.__node_ids, headers=self.__headers))
         return results
 
 
@@ -97,7 +103,7 @@ class QKDDriver(_Driver):
                         data = create_connectivity_link(
                             self.__qkd_root, link_uuid, node_id_src, interface_id_src, node_id_dst, interface_id_dst, 
                             virt_prev_hop, virt_next_hops, virt_bandwidth,
-                            timeout=self.__timeout, auth=self.__auth
+                            timeout=self.__timeout, auth=self.__auth, headers=self.__headers
                         )
 
                         #data = create_connectivity_link(
diff --git a/src/device/service/drivers/qkd/QKDDriver2.py b/src/device/service/drivers/qkd/QKDDriver2.py
new file mode 100644
index 0000000000000000000000000000000000000000..84d9f411ef42b9af6d56a8142394a81e7dfa0104
--- /dev/null
+++ b/src/device/service/drivers/qkd/QKDDriver2.py
@@ -0,0 +1,228 @@
+import os
+import json
+import logging
+import requests
+import threading
+from requests.auth import HTTPBasicAuth
+from typing import Any, List, Optional, Tuple, Union
+from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method
+from common.type_checkers.Checkers import chk_string, chk_type
+from device.service.driver_api._Driver import _Driver
+from .Tools2 import config_getter, create_connectivity_link
+
+LOGGER = logging.getLogger(__name__)
+
+DRIVER_NAME = 'qkd'
+METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': DRIVER_NAME})
+
+
+class QKDDriver(_Driver):
+    def __init__(self, address: str, port: int, **settings) -> None:
+        LOGGER.info(f"Initializing QKDDriver with address={address}, port={port}, settings={settings}")
+        super().__init__(DRIVER_NAME, address, port, **settings)
+        self.__lock = threading.Lock()
+        self.__started = threading.Event()
+        self.__terminate = threading.Event()
+        self.__auth = None
+        self.__headers = {}
+        self.__qkd_root = os.getenv('QKD_API_URL', '{:s}://{:s}:{:d}'.format(settings.get('scheme', 'http'), self.address, int(self.port)))
+        self.__timeout = int(self.settings.get('timeout', 120))
+        self.__node_ids = set(self.settings.get('node_ids', []))
+        self.__initial_data = None
+
+        # Authentication settings
+        self.__username = settings.get('username')
+        self.__password = settings.get('password')
+        self.__use_jwt = settings.get('use_jwt', True)  # Default to True if JWT is required
+        self.__token = settings.get('token')
+
+        if self.__token:
+            self.__headers = {'Authorization': 'Bearer ' + self.__token}
+        elif self.__username and self.__password:
+            self.__auth = HTTPBasicAuth(self.__username, self.__password)
+
+        LOGGER.info(f"QKDDriver initialized with QKD root URL: {self.__qkd_root}")
+
+    def authenticate(self) -> bool:
+        if self.__use_jwt and not self.__token:
+            return self.__authenticate_with_jwt()
+        return True
+
+    def __authenticate_with_jwt(self) -> bool:
+        login_url = f'{self.__qkd_root}/login'
+        payload = {'username': self.__username, 'password': self.__password}
+
+        try:
+            LOGGER.info(f'Attempting to authenticate with JWT at {login_url}')
+            response = requests.post(login_url, data=payload, timeout=self.__timeout)
+            response.raise_for_status()
+            token = response.json().get('access_token')
+            if not token:
+                LOGGER.error('Failed to retrieve access token')
+                return False
+            self.__token = token  # Store the token
+            self.__headers = {'Authorization': f'Bearer {token}'}
+            LOGGER.info('JWT authentication successful')
+            return True
+        except requests.exceptions.RequestException as e:
+            LOGGER.exception(f'JWT authentication failed: {e}')
+            return False
+
+    def Connect(self) -> bool:
+        url = self.__qkd_root + '/restconf/data/etsi-qkd-sdn-node:qkd_node'
+        with self.__lock:
+            LOGGER.info(f"Starting connection to {url}")
+            if self.__started.is_set():
+                LOGGER.info("Already connected, skipping re-connection.")
+                return True
+
+            try:
+                if not self.__headers and not self.__auth:
+                    LOGGER.info("No headers or auth found, calling authenticate.")
+                    if not self.authenticate():
+                        return False
+
+                LOGGER.info(f'Attempting to connect to {url} with headers {self.__headers} and timeout {self.__timeout}')
+                response = requests.get(url, timeout=self.__timeout, verify=False, headers=self.__headers, auth=self.__auth)
+                LOGGER.info(f'Received response: {response.status_code}, content: {response.text}')
+                response.raise_for_status()
+                self.__initial_data = response.json()
+                self.__started.set()
+                LOGGER.info('Connection successful')
+                return True
+            except requests.exceptions.RequestException as e:
+                LOGGER.error(f'Connection failed: {e}')
+                return False
+
+    def Disconnect(self) -> bool:
+        LOGGER.info("Disconnecting QKDDriver")
+        with self.__lock:
+            self.__terminate.set()
+            LOGGER.info("QKDDriver disconnected successfully")
+            return True
+
+    @metered_subclass_method(METRICS_POOL)
+    def GetInitialConfig(self) -> List[Tuple[str, Any]]:
+        LOGGER.info("Getting initial configuration")
+        with self.__lock:
+            if isinstance(self.__initial_data, dict):
+                initial_config = [('qkd_node', self.__initial_data.get('qkd_node', {}))]
+                LOGGER.info(f"Initial configuration: {initial_config}")
+                return initial_config
+            LOGGER.warning("Initial data is not a dictionary")
+            return []
+
+    @metered_subclass_method(METRICS_POOL)
+    def GetConfig(self, resource_keys: List[str] = []) -> List[Tuple[str, Union[Any, None, Exception]]]:
+        chk_type('resources', resource_keys, list)
+        LOGGER.info(f"Getting configuration for resource_keys: {resource_keys}")
+        results = []
+        with self.__lock:
+            if not resource_keys:
+                resource_keys = ['capabilities', 'interfaces', 'links', 'endpoints', 'apps']
+            for i, resource_key in enumerate(resource_keys):
+                chk_string(f'resource_key[{i}]', resource_key, allow_empty=False)
+                LOGGER.info(f"Retrieving resource key: {resource_key}")
+                resource_results = config_getter(
+                    self.__qkd_root, resource_key, timeout=self.__timeout, headers=self.__headers, auth=self.__auth,
+                    node_ids=self.__node_ids)
+                results.extend(resource_results)
+                LOGGER.info(f"Resource results for {resource_key}: {resource_results}")
+        LOGGER.info(f"Final configuration results: {results}")
+        return results
+
+    @metered_subclass_method(METRICS_POOL)
+    def SetConfig(self, resources: List[Tuple[str, Any]]) -> List[Union[bool, Exception]]:
+        LOGGER.info(f"Setting configuration for resources: {resources}")
+        results = []
+        if not resources:
+            LOGGER.warning("No resources provided for SetConfig")
+            return results
+        with self.__lock:
+            for resource_key, resource_value in resources:
+                LOGGER.info(f'Processing resource_key: {resource_key}, resource_value: {resource_value}')
+
+                if resource_key.startswith('/link'):
+                    try:
+                        if not isinstance(resource_value, dict):
+                            raise TypeError(f"Expected dictionary but got {type(resource_value).__name__}")
+
+                        link_uuid = resource_value.get('uuid')
+                        node_id_src = resource_value.get('src_qkdn_id')
+                        interface_id_src = resource_value.get('src_interface_id')
+                        node_id_dst = resource_value.get('dst_qkdn_id')
+                        interface_id_dst = resource_value.get('dst_interface_id')
+                        virt_prev_hop = resource_value.get('virt_prev_hop')
+                        virt_next_hops = resource_value.get('virt_next_hops')
+                        virt_bandwidth = resource_value.get('virt_bandwidth')
+
+                        LOGGER.info(f"Creating connectivity link with UUID: {link_uuid}")
+                        create_connectivity_link(
+                            self.__qkd_root, link_uuid, node_id_src, interface_id_src, node_id_dst, interface_id_dst,
+                            virt_prev_hop, virt_next_hops, virt_bandwidth,
+                            headers=self.__headers, timeout=self.__timeout, auth=self.__auth
+                        )
+                        results.append(True)
+                        LOGGER.info(f"Connectivity link {link_uuid} created successfully")
+                    except Exception as e:
+                        LOGGER.exception(f'Unhandled error processing resource_key({resource_key})')
+                        results.append(e)
+                else:
+                    LOGGER.error(f'Invalid resource key detected: {resource_key}')
+                    results.append(ValueError(f'Invalid resource key: {resource_key}'))
+        
+        LOGGER.info(f"SetConfig results: {results}")
+        return results
+
+    @metered_subclass_method(METRICS_POOL)
+    def DeleteConfig(self, resources: List[Tuple[str, Any]]) -> List[Union[bool, Exception]]:
+        LOGGER.info(f"Deleting configuration for resources: {resources}")
+        results = []
+        if not resources:
+            LOGGER.warning("No resources provided for DeleteConfig")
+            return results
+        with self.__lock:
+            for resource in resources:
+                LOGGER.info(f'Resource to delete: {resource}')
+                uuid = resource[1].get('uuid')
+                if uuid:
+                    LOGGER.info(f'Resource with UUID {uuid} deleted successfully')
+                    results.append(True)
+                else:
+                    LOGGER.warning(f"UUID not found in resource: {resource}")
+                    results.append(False)
+        LOGGER.info(f"DeleteConfig results: {results}")
+        return results
+
+    @metered_subclass_method(METRICS_POOL)
+    def SubscribeState(self, subscriptions: List[Tuple[str, float, float]]) -> List[Union[bool, Exception]]:
+        LOGGER.info(f"Subscribing to state updates: {subscriptions}")
+        results = [True for _ in subscriptions]
+        LOGGER.info(f"Subscription results: {results}")
+        return results
+
+    @metered_subclass_method(METRICS_POOL)
+    def UnsubscribeState(self, subscriptions: List[Tuple[str, float, float]]) -> List[Union[bool, Exception]]:
+        LOGGER.info(f"Unsubscribing from state updates: {subscriptions}")
+        results = [True for _ in subscriptions]
+        LOGGER.info(f"Unsubscription results: {results}")
+        return results
+
+    @metered_subclass_method(METRICS_POOL)
+    def GetState(self, blocking=False, terminate: Optional[threading.Event] = None) -> Union[dict, list]:
+        LOGGER.info(f"GetState called with blocking={blocking}, terminate={terminate}")
+        url = self.__qkd_root + '/restconf/data/etsi-qkd-sdn-node:qkd_node'
+        try:
+            LOGGER.info(f"Making GET request to {url} to retrieve state")
+            response = requests.get(url, timeout=self.__timeout, verify=False, headers=self.__headers, auth=self.__auth)
+            LOGGER.info(f"Received state response: {response.status_code}, content: {response.text}")
+            response.raise_for_status()
+            state_data = response.json()
+            LOGGER.info(f"State data retrieved: {state_data}")
+            return state_data
+        except requests.exceptions.Timeout:
+            LOGGER.error(f'Timeout getting state from {self.__qkd_root}')
+            return []
+        except Exception as e:
+            LOGGER.error(f'Exception getting state from {self.__qkd_root}: {e}')
+            return []
diff --git a/src/device/service/drivers/qkd/Tools.py b/src/device/service/drivers/qkd/Tools.py
index 38e80ad5075c9078d26ae3ef1c036d5f021d2df3..bfdf3033acd30bb62360a084343e47acd710a631 100644
--- a/src/device/service/drivers/qkd/Tools.py
+++ b/src/device/service/drivers/qkd/Tools.py
@@ -21,7 +21,7 @@ def find_key(resource, key):
 
 def config_getter(
     root_url : str, resource_key : str, auth : Optional[HTTPBasicAuth] = None, timeout : Optional[int] = None,
-    node_ids : Set[str] = set()
+    node_ids : Set[str] = set(), headers={}
 ):
     # getting endpoints
 
@@ -33,7 +33,7 @@ def config_getter(
     try:
         if resource_key in [RESOURCE_ENDPOINTS, RESOURCE_INTERFACES]:
             url += 'qkd_interfaces/'
-            r = requests.get(url, timeout=timeout, verify=False, auth=auth)
+            r = requests.get(url, timeout=timeout, verify=False, auth=auth, headers=headers)
             interfaces = r.json()['qkd_interfaces']['qkd_interface']
 
             # If it's a physical endpoint
@@ -73,7 +73,7 @@ def config_getter(
         
         elif resource_key in [RESOURCE_LINKS, RESOURCE_NETWORK_INSTANCES]:
             url += 'qkd_links/'
-            r = requests.get(url, timeout=timeout, verify=False, auth=auth)
+            r = requests.get(url, timeout=timeout, verify=False, auth=auth, headers=headers)
             links = r.json()['qkd_links']['qkd_link']
 
             if resource_key == RESOURCE_LINKS:
@@ -93,7 +93,7 @@ def config_getter(
         
         elif resource_key == RESOURCE_APPS:
             url += 'qkd_applications/'
-            r = requests.get(url, timeout=timeout, verify=False, auth=auth)
+            r = requests.get(url, timeout=timeout, verify=False, auth=auth, headers=headers)
             apps = r.json()['qkd_applications']['qkd_app']
 
             for app in apps:
@@ -103,13 +103,13 @@ def config_getter(
 
         elif resource_key == RESOURCE_CAPABILITES:
             url += 'qkdn_capabilities/'
-            r = requests.get(url, timeout=timeout, verify=False, auth=auth)
+            r = requests.get(url, timeout=timeout, verify=False, auth=auth, headers=headers)
             capabilities = r.json()['qkdn_capabilities']
 
             result.append((resource_key, capabilities))
         
         elif resource_key == RESOURCE_NODE:
-            r = requests.get(url, timeout=timeout, verify=False, auth=auth)
+            r = requests.get(url, timeout=timeout, verify=False, auth=auth, headers=headers)
             node = r.json()['qkd_node']
 
             result.append((resource_key, node))
@@ -128,7 +128,7 @@ def config_getter(
 def create_connectivity_link(
     root_url, link_uuid, node_id_src, interface_id_src, node_id_dst, interface_id_dst,
     virt_prev_hop = None, virt_next_hops = None, virt_bandwidth = None,
-    auth : Optional[HTTPBasicAuth] = None, timeout : Optional[int] = None
+    auth : Optional[HTTPBasicAuth] = None, timeout : Optional[int] = None, headers={}
 ):
 
     url = root_url + '/restconf/data/etsi-qkd-sdn-node:qkd_node/qkd_links/'
@@ -155,5 +155,5 @@ def create_connectivity_link(
 
     data = {'qkd_links': {'qkd_link': [qkd_link]}}
 
-    requests.post(url, json=data)
+    requests.post(url, json=data, headers=headers)
 
diff --git a/src/device/service/drivers/qkd/Tools2.py b/src/device/service/drivers/qkd/Tools2.py
new file mode 100644
index 0000000000000000000000000000000000000000..ea88799f4d2ce2d65952ccb0ae59bc2da3770a58
--- /dev/null
+++ b/src/device/service/drivers/qkd/Tools2.py
@@ -0,0 +1,209 @@
+import json
+import logging
+import requests
+from requests.auth import HTTPBasicAuth
+from typing import Dict, Optional, Set, List, Tuple, Union, Any
+from requests.adapters import HTTPAdapter
+from urllib3.util.retry import Retry
+
+LOGGER = logging.getLogger(__name__)
+
+HTTP_OK_CODES = {
+    200,    # OK
+    201,    # Created
+    202,    # Accepted
+    204,    # No Content
+}
+
+def get_request_session(retries=5, backoff_factor=1.0, status_forcelist=(500, 502, 504)):
+    """
+    Creates a requests session with retries and backoff strategy.
+    """
+    LOGGER.info(f"Creating request session with retries={retries}, backoff_factor={backoff_factor}, status_forcelist={status_forcelist}")
+    session = requests.Session()
+    retry = Retry(
+        total=retries,
+        read=retries,
+        connect=retries,
+        backoff_factor=backoff_factor,
+        status_forcelist=status_forcelist,
+    )
+    adapter = HTTPAdapter(max_retries=retry)
+    session.mount('http://', adapter)
+    session.mount('https://', adapter)
+    LOGGER.info("Request session created successfully")
+    return session
+
+def find_key(resource, key):
+    """
+    Extracts a specific key from a JSON resource.
+    """
+    return json.loads(resource[1])[key]
+
+def verify_endpoint_existence(session, endpoint_uuid, root_url, headers):
+    """
+    Verifies if the given endpoint exists.
+    """
+    url = f"{root_url}/restconf/data/etsi-qkd-sdn-node:qkd_node/qkd_interfaces/qkd_interface={endpoint_uuid}"
+    r = session.get(url, headers=headers)
+    if r.status_code == 200 and r.json():
+        return True
+    else:
+        LOGGER.error(f"Endpoint {endpoint_uuid} does not exist or is not accessible")
+        return False
+
+def config_getter(
+    root_url: str, resource_key: str, auth: Optional[HTTPBasicAuth] = None, timeout: Optional[int] = None,
+    node_ids: Set[str] = set(), headers: Dict[str, str] = {}
+) -> List[Tuple[str, Union[Dict[str, Any], Exception]]]:
+    """
+    Fetches configuration data from a QKD node for a specified resource key.
+    Returns a list of tuples containing the resource key and the corresponding data or exception.
+    """
+    url = f"{root_url}/restconf/data/etsi-qkd-sdn-node:qkd_node/"
+    result = []
+    session = get_request_session()
+
+    LOGGER.info(f"Starting config_getter with root_url={root_url}, resource_key={resource_key}, headers={headers}")
+
+    try:
+        if resource_key in ['endpoints', '__endpoints__', 'interfaces']:
+            url += 'qkd_interfaces/'
+            LOGGER.info(f"Making GET request to {url} with headers: {headers}")
+            r = session.get(url, timeout=timeout, verify=False, auth=auth, headers=headers)
+            LOGGER.info(f"Received response: {r.status_code}, content: {r.text}")
+            r.raise_for_status()
+            interfaces = r.json().get('qkd_interfaces', {}).get('qkd_interface', [])
+            if not interfaces:
+                raise KeyError('qkd_interfaces')
+            for interface in interfaces:
+                if resource_key in ['endpoints', '__endpoints__']:
+                    endpoint_uuid = f"{interface['qkdi_att_point'].get('device', 'N/A')}:{interface['qkdi_att_point'].get('port', 'N/A')}"
+                    resource_key_with_uuid = f"/endpoints/endpoint[{endpoint_uuid}]"
+                    interface['uuid'] = endpoint_uuid
+                    result.append((resource_key_with_uuid, interface))
+                else:
+                    interface_uuid = f"{interface['qkdi_att_point'].get('device', 'N/A')}:{interface['qkdi_att_point'].get('port', 'N/A')}"
+                    interface['uuid'] = interface_uuid
+                    interface['name'] = interface_uuid
+                    interface['enabled'] = True
+                    resource_key_with_uuid = f"/interface[{interface['qkdi_id']}]"
+                    result.append((resource_key_with_uuid, interface))
+
+        elif resource_key in ['links', '__links__', '__network_instances__', 'network_instances']:
+            url += 'qkd_links/'
+            LOGGER.info(f"Making GET request to {url} with headers: {headers}")
+            r = session.get(url, timeout=timeout, verify=False, auth=auth, headers=headers)
+            LOGGER.info(f"Received response: {r.status_code}, content: {r.text}")
+            r.raise_for_status()
+            links = r.json().get('qkd_links', {}).get('qkd_link', [])
+            if not links:
+                LOGGER.warning(f"No links found in the response for 'qkd_links'")
+            
+            for link in links:
+                link_type = link.get('qkdl_type', 'Direct')
+
+                if resource_key == 'links':
+                    if link_type == 'Direct':
+                        resource_key_with_uuid = f"/link[{link['qkdl_id']}]"
+                        result.append((resource_key_with_uuid, link))
+                else:
+                    if link_type == 'Virtual':
+                        resource_key_with_uuid = f"/service[{link['qkdl_id']}]"
+                        result.append((resource_key_with_uuid, link))
+
+        elif resource_key in ['apps', '__apps__']:
+            url += 'qkd_applications/'
+            LOGGER.info(f"Making GET request to {url} with headers: {headers}")
+            r = session.get(url, timeout=timeout, verify=False, auth=auth, headers=headers)
+            LOGGER.info(f"Received response: {r.status_code}, content: {r.text}")
+            r.raise_for_status()
+            apps = r.json().get('qkd_applications', {}).get('qkd_app', [])
+            if not apps:
+                raise KeyError('qkd_applications')
+            for app in apps:
+                app_resource_key = f"/app[{app['app_id']}]"
+                result.append((app_resource_key, app))
+
+        elif resource_key in ['capabilities', '__capabilities__']:
+            url += 'qkdn_capabilities/'
+            LOGGER.info(f"Making GET request to {url} with headers: {headers}")
+            r = session.get(url, timeout=timeout, verify=False, auth=auth, headers=headers)
+            LOGGER.info(f"Received response: {r.status_code}, content: {r.text}")
+            r.raise_for_status()
+            capabilities = r.json()
+            result.append((resource_key, capabilities))
+
+        elif resource_key in ['node', '__node__']:
+            LOGGER.info(f"Making GET request to {url} with headers: {headers}")
+            r = session.get(url, timeout=timeout, verify=False, auth=auth, headers=headers)
+            LOGGER.info(f"Received response: {r.status_code}, content: {r.text}")
+            r.raise_for_status()
+            node = r.json().get('qkd_node', {})
+            result.append((resource_key, node))
+
+        else:
+            LOGGER.warning(f"Unknown resource key: {resource_key}")
+            result.append((resource_key, ValueError(f"Unknown resource key: {resource_key}")))
+
+    except requests.exceptions.RequestException as e:
+        LOGGER.error(f'Exception retrieving/parsing {resource_key} from {url}: {e}')
+        result.append((resource_key, e))
+
+    LOGGER.info(f"config_getter results for {resource_key}: {result}")
+    return result
+
+def create_connectivity_link(
+    root_url: str, link_uuid: str, node_id_src: str, interface_id_src: str, node_id_dst: str, interface_id_dst: str,
+    virt_prev_hop: Optional[str] = None, virt_next_hops: Optional[List[str]] = None, virt_bandwidth: Optional[int] = None,
+    auth: Optional[HTTPBasicAuth] = None, timeout: Optional[int] = None, headers: Dict[str, str] = {}
+) -> Union[bool, Exception]:
+    """
+    Creates a connectivity link between QKD nodes using the provided parameters.
+    """
+    url = f"{root_url}/restconf/data/etsi-qkd-sdn-node:qkd_node/qkd_links/"
+    session = get_request_session()
+
+    # Verify that endpoints exist before creating the link
+    if not (verify_endpoint_existence(session, interface_id_src, root_url, headers) and
+            verify_endpoint_existence(session, interface_id_dst, root_url, headers)):
+        LOGGER.error(f"Cannot create link {link_uuid} because one or both endpoints do not exist.")
+        return Exception(f"Endpoint verification failed for link {link_uuid}")
+
+    is_virtual = bool(virt_prev_hop or virt_next_hops)
+
+    qkd_link = {
+        'qkdl_id': link_uuid,
+        'qkdl_type': 'etsi-qkd-node-types:' + ('VIRT' if is_virtual else 'PHYS'),
+        'qkdl_local': {
+            'qkdn_id': node_id_src,
+            'qkdi_id': interface_id_src
+        },
+        'qkdl_remote': {
+            'qkdn_id': node_id_dst,
+            'qkdi_id': interface_id_dst
+        }
+    }
+
+    if is_virtual:
+        qkd_link['virt_prev_hop'] = virt_prev_hop
+        qkd_link['virt_next_hop'] = virt_next_hops or []
+        qkd_link['virt_bandwidth'] = virt_bandwidth
+
+        data = {'qkd_links': {'qkd_link': [qkd_link]}}
+
+    LOGGER.info(f"Creating connectivity link with payload: {json.dumps(data)}")
+
+    try:
+        r = session.post(url, json=data, timeout=timeout, verify=False, auth=auth, headers=headers)
+        LOGGER.info(f"Received response for link creation: {r.status_code}, content: {r.text}")
+        r.raise_for_status()
+        if r.status_code in HTTP_OK_CODES:
+            LOGGER.info(f"Link {link_uuid} created successfully.")
+            return True
+        else:
+            LOGGER.error(f"Failed to create link {link_uuid}, status code: {r.status_code}")
+            return False
+    except requests.exceptions.RequestException as e:
+        LOGGER.error(f"Exception creating link {link_uuid} with payload {json.dumps(data)}: {e}")
+        return e
diff --git a/src/device/tests/qkd/integration/test_qkd_integration.py b/src/device/tests/qkd/integration/test_qkd_integration.py
new file mode 100644
index 0000000000000000000000000000000000000000..73477de0add6b99eb8207a494c0e5c6be59fbcd1
--- /dev/null
+++ b/src/device/tests/qkd/integration/test_qkd_integration.py
@@ -0,0 +1,35 @@
+import pytest
+from src.device.service.drivers.qkd.QKDDriver2 import QKDDriver
+
+def test_end_to_end_workflow():
+    driver = QKDDriver(address='10.211.36.220', port=11111, username='user', password='pass')
+    assert driver.Connect() is True
+    
+    # Retrieve initial configuration
+    config = driver.GetInitialConfig()
+    assert isinstance(config, dict)
+    assert 'qkd_node' in config
+    
+    # Define the new configuration
+    new_config = {'uuid': 'test', 'device': 'device1', 'port': 'port1'}
+    
+    # Use a valid resource key based on driver implementation
+    valid_resource_key = '/link/valid_resource_key'  # Adjust this key as necessary
+    
+    try:
+        result = driver.SetConfig([(valid_resource_key, new_config)])
+        
+        # Check for ValueErrors in results
+        if any(isinstance(res, ValueError) for res in result):
+            pytest.fail(f"SetConfig failed with error: {next(res for res in result if isinstance(res, ValueError))}")
+
+        # Ensure result is a list of booleans
+        assert isinstance(result, list)
+        assert all(isinstance(res, bool) for res in result)
+        assert all(result)  # Ensure all operations succeeded
+    except Exception as e:
+        pytest.fail(f"SetConfig failed: {e}")
+
+
+
+
diff --git a/src/device/tests/qkd/integration/test_qkd_luxquanta_retrieve_information.py b/src/device/tests/qkd/integration/test_qkd_luxquanta_retrieve_information.py
new file mode 100644
index 0000000000000000000000000000000000000000..9364b8e5e33129e0b13ecad2eb0a9646d5e547a2
--- /dev/null
+++ b/src/device/tests/qkd/integration/test_qkd_luxquanta_retrieve_information.py
@@ -0,0 +1,177 @@
+import pytest
+import json
+import logging
+from src.device.service.drivers.qkd.QKDDriver2 import QKDDriver
+
+# Set up logging
+logging.basicConfig(level=logging.INFO)
+LOGGER = logging.getLogger(__name__)
+
+class SafeJSONEncoder(json.JSONEncoder):
+    def default(self, obj):
+        if isinstance(obj, Exception):
+            return {'error': str(obj), 'type': type(obj).__name__}
+        return super().default(obj)
+
+# Dictionary to store retrieved information
+retrieved_info = {
+    "config_qkd1": None,
+    "config_qkd2": None,
+    "capabilities_qkd1": None,
+    "capabilities_qkd2": None,
+    "interfaces_qkd1": None,
+    "interfaces_qkd2": None,
+    "links_qkd1": None,
+    "links_qkd2": None,
+    "state_qkd1": None,
+    "state_qkd2": None,
+}
+
+@pytest.fixture
+def driver_qkd1():
+    return QKDDriver(address='10.13.13.2', port=5100, username='admin', password='password', use_jwt=True)
+
+@pytest.fixture
+def driver_qkd2():
+    return QKDDriver(address='10.13.13.3', port=5100, username='admin', password='password', use_jwt=True)
+
+def log_data(label, data):
+    LOGGER.info(f"{label}: {json.dumps(data, indent=2, cls=SafeJSONEncoder)}")
+
+def get_jwt_token(driver):
+    try:
+        return driver._QKDDriver__headers.get('Authorization').split(' ')[1]
+    except (AttributeError, KeyError, TypeError):
+        return None
+
+def save_json_file(filename, data):
+    """Save data to a JSON file."""
+    try:
+        with open(filename, 'w') as f:
+            json.dump(data, f, indent=2)
+        LOGGER.info(f"Successfully saved {filename}")
+    except Exception as e:
+        LOGGER.error(f"Failed to save {filename}: {e}")
+
+def test_retrieve_and_create_descriptor(driver_qkd1, driver_qkd2):
+    # Connect to both QKD nodes
+    assert driver_qkd1.Connect()
+    assert driver_qkd2.Connect()
+
+    # Use the same JWT token for all requests
+    jwt_token = get_jwt_token(driver_qkd1)
+    assert jwt_token, "Failed to retrieve JWT token from QKD1"
+
+    driver_qkd2._QKDDriver__headers['Authorization'] = f'Bearer {jwt_token}'
+
+    # Retrieve initial configs
+    config_qkd1 = driver_qkd1.GetInitialConfig()
+    retrieved_info['config_qkd1'] = config_qkd1
+    log_data("QKD1 Initial Config", config_qkd1)
+    assert config_qkd1, "Failed to retrieve initial configuration for QKD1"
+
+    config_qkd2 = driver_qkd2.GetInitialConfig()
+    retrieved_info['config_qkd2'] = config_qkd2
+    log_data("QKD2 Initial Config", config_qkd2)
+    assert config_qkd2, "Failed to retrieve initial configuration for QKD2"
+
+    # Retrieve capabilities
+    capabilities_qkd1 = driver_qkd1.GetConfig(['capabilities'])
+    retrieved_info['capabilities_qkd1'] = capabilities_qkd1
+    log_data("QKD1 Capabilities", capabilities_qkd1)
+    assert capabilities_qkd1, "Failed to retrieve capabilities for QKD1"
+
+    capabilities_qkd2 = driver_qkd2.GetConfig(['capabilities'])
+    retrieved_info['capabilities_qkd2'] = capabilities_qkd2
+    log_data("QKD2 Capabilities", capabilities_qkd2)
+    assert capabilities_qkd2, "Failed to retrieve capabilities for QKD2"
+
+    # Retrieve interfaces
+    interfaces_qkd1 = driver_qkd1.GetConfig(['interfaces'])
+    retrieved_info['interfaces_qkd1'] = interfaces_qkd1
+    log_data("QKD1 Interfaces", interfaces_qkd1)
+    assert interfaces_qkd1, "Failed to retrieve interfaces for QKD1"
+
+    interfaces_qkd2 = driver_qkd2.GetConfig(['interfaces'])
+    retrieved_info['interfaces_qkd2'] = interfaces_qkd2
+    log_data("QKD2 Interfaces", interfaces_qkd2)
+    assert interfaces_qkd2, "Failed to retrieve interfaces for QKD2"
+
+    # Retrieve links
+    links_qkd1 = driver_qkd1.GetConfig(['links'])
+    retrieved_info['links_qkd1'] = links_qkd1
+    log_data("QKD1 Links", links_qkd1)
+    assert links_qkd1, "Failed to retrieve links for QKD1"
+
+    links_qkd2 = driver_qkd2.GetConfig(['links'])
+    retrieved_info['links_qkd2'] = links_qkd2
+    log_data("QKD2 Links", links_qkd2)
+    assert links_qkd2, "Failed to retrieve links for QKD2"
+
+    # Retrieve states
+    state_qkd1 = driver_qkd1.GetState()
+    retrieved_info['state_qkd1'] = state_qkd1
+    log_data("QKD1 Current State", state_qkd1)
+    assert state_qkd1, "Failed to retrieve state for QKD1"
+
+    state_qkd2 = driver_qkd2.GetState()
+    retrieved_info['state_qkd2'] = state_qkd2
+    log_data("QKD2 Current State", state_qkd2)
+    assert state_qkd2, "Failed to retrieve state for QKD2"
+
+    # Save retrieved information after all data retrieval
+    save_json_file('retrieved_info.json', retrieved_info)
+
+    # Dynamically create the descriptor
+    descriptor = {
+        "contexts": [
+            {"context_id": {"context_uuid": {"uuid": "admin"}}}
+        ],
+        "topologies": [
+            {"topology_id": {"topology_uuid": {"uuid": "admin"}, "context_id": {"context_uuid": {"uuid": "admin"}}}}
+        ],
+        "devices": [],
+        "links": []
+    }
+
+    # Dynamically add device information
+    for config, token, interfaces, device_name, address, port in [
+        (config_qkd1, jwt_token, interfaces_qkd1, "QKD1", "10.13.13.2", "5100"),
+        (config_qkd2, jwt_token, interfaces_qkd2, "QKD2", "10.13.13.3", "5100")
+    ]:
+        device_info = {
+            "device_id": {"device_uuid": {"uuid": device_name}},
+            "device_type": "qkd-node",
+            "device_operational_status": 0,
+            "device_drivers": [12],  # This could be dynamically determined if needed
+            "device_endpoints": [],
+            "device_config": {
+                "config_rules": [
+                    {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": address}},
+                    {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": port}},
+                    {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {
+                        "scheme": "http",
+                        "token": token if token else "N/A"
+                    }}}
+                ]
+            }
+        }
+
+        descriptor['devices'].append(device_info)
+
+    # Dynamically create and add links based on retrieved links data
+    if links_qkd1 and links_qkd2:
+        for link_data in links_qkd1:
+            link_uuid = link_data[1].get('qkdl_id')
+            link_entry = {
+                "link_id": {"link_uuid": {"uuid": f"QKD1/{address}:{port}==QKD2/{links_qkd2[0][1]['qkdi_status']}/{port}"}},
+                "link_endpoint_ids": [
+                    {"device_id": {"device_uuid": {"uuid": "QKD1"}}, "endpoint_uuid": {"uuid": f"{address}:{port}"}},
+                    {"device_id": {"device_uuid": {"uuid": "QKD2"}}, "endpoint_uuid": {"uuid": f"{address}:{port}"}}
+                ]
+            }
+            descriptor['links'].append(link_entry)
+
+    # Save the dynamically created descriptor
+    save_json_file('descriptor.json', descriptor)
+    log_data("Created Descriptor", descriptor)
\ No newline at end of file
diff --git a/src/device/tests/qkd/mock_qkd_nodes/YangValidator.py b/src/device/tests/qkd/mock_qkd_nodes/YangValidator.py
new file mode 100644
index 0000000000000000000000000000000000000000..2056d5df64a1d841fc74c1be73aa6408051ab738
--- /dev/null
+++ b/src/device/tests/qkd/mock_qkd_nodes/YangValidator.py
@@ -0,0 +1,42 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import libyang, os
+from typing import Dict, Optional
+
+YANG_DIR = os.path.join(os.path.dirname(__file__), 'yang')
+
+class YangValidator:
+    def __init__(self, main_module : str, dependency_modules : [str]) -> None:
+        self._yang_context = libyang.Context(YANG_DIR)
+
+        self._yang_module = self._yang_context.load_module(main_module)
+        mods = [self._yang_context.load_module(mod) for mod in dependency_modules] + [self._yang_module]
+
+        for mod in mods:
+            mod.feature_enable_all()
+        
+
+
+    def parse_to_dict(self, message : Dict) -> Dict:
+        dnode : Optional[libyang.DNode] = self._yang_module.parse_data_dict(
+            message, validate_present=True, validate=True, strict=True
+        )
+        if dnode is None: raise Exception('Unable to parse Message({:s})'.format(str(message)))
+        message = dnode.print_dict()
+        dnode.free()
+        return message
+
+    def destroy(self) -> None:
+        self._yang_context.destroy()
diff --git a/src/device/tests/qkd/mock_qkd_nodes/mock.py b/src/device/tests/qkd/mock_qkd_nodes/mock.py
new file mode 100644
index 0000000000000000000000000000000000000000..b5d884197f12beb20618744534e144929f3686b7
--- /dev/null
+++ b/src/device/tests/qkd/mock_qkd_nodes/mock.py
@@ -0,0 +1,355 @@
+import os
+
+from flask import Flask, request
+from YangValidator import YangValidator
+
+app = Flask(__name__)
+
+
+yang_validator = YangValidator('etsi-qkd-sdn-node', ['etsi-qkd-node-types'])
+
+
+nodes = {
+    '10.211.36.220:11111': {'node': {
+            'qkdn_id': '00000001-0000-0000-0000-000000000000',
+        },
+        'qkdn_capabilities': {
+        },
+        'qkd_applications': {
+            'qkd_app': [
+                {
+                    'app_id': '00000001-0001-0000-0000-000000000000',           
+                    'client_app_id': [],
+                    'app_statistics': {
+                        'statistics': []
+                    },
+                    'app_qos': {
+                    },
+                    'backing_qkdl_id': []
+                }
+            ]
+        },
+        'qkd_interfaces': {
+            'qkd_interface': [
+                {
+                    'qkdi_id': '100',
+                    'qkdi_att_point': {
+                    },
+                    'qkdi_capabilities': {
+                    }
+                },
+                {
+                    'qkdi_id': '101',
+                    'qkdi_att_point': {
+                        'device':'10.211.36.220',
+                        'port':'1001'
+                    },
+                    'qkdi_capabilities': {
+                    }
+                }
+            ]
+        },
+        'qkd_links': {
+            'qkd_link': [
+
+            ]
+        }
+    },
+
+    '10.211.36.220:22222': {'node': {
+            'qkdn_id': '00000002-0000-0000-0000-000000000000',
+        },
+        'qkdn_capabilities': {
+        },
+        'qkd_applications': {
+            'qkd_app': [
+                {
+                    'app_id': '00000002-0001-0000-0000-000000000000',           
+                    'client_app_id': [],
+                    'app_statistics': {
+                        'statistics': []
+                    },
+                    'app_qos': {
+                    },
+                    'backing_qkdl_id': []
+                }
+            ]
+        },
+        'qkd_interfaces': {
+            'qkd_interface': [
+                {
+                    'qkdi_id': '200',
+                    'qkdi_att_point': {
+                    },
+                    'qkdi_capabilities': {
+                    }
+                },
+                {
+                    'qkdi_id': '201',
+                    'qkdi_att_point': {
+                        'device':'10.211.36.220',
+                        'port':'2001'
+                    },
+                    'qkdi_capabilities': {
+                    }
+                },
+                {
+                    'qkdi_id': '202',
+                    'qkdi_att_point': {
+                        'device':'10.211.36.220',
+                        'port':'2002'
+                    },
+                    'qkdi_capabilities': {
+                    }
+                }
+            ]
+        },
+        'qkd_links': {
+            'qkd_link': [
+
+            ] 
+        }
+    },
+
+    '10.211.36.220:33333': {'node': {
+            'qkdn_id': '00000003-0000-0000-0000-000000000000',
+        },
+        'qkdn_capabilities': {
+        },
+        'qkd_applications': {
+            'qkd_app': [
+                {
+                    'app_id': '00000003-0001-0000-0000-000000000000',           
+                    'client_app_id': [],
+                    'app_statistics': {
+                        'statistics': []
+                    },
+                    'app_qos': {
+                    },
+                    'backing_qkdl_id': []
+                }
+            ]
+        },
+        'qkd_interfaces': {
+            'qkd_interface': [
+                {
+                    'qkdi_id': '300',
+                    'qkdi_att_point': {
+                    },
+                    'qkdi_capabilities': {
+                    }
+                },
+                {
+                    'qkdi_id': '301',
+                    'qkdi_att_point': {
+                        'device':'10.211.36.220',
+                        'port':'3001'
+                    },
+                    'qkdi_capabilities': {
+                    }
+                }
+            ]
+        },
+        'qkd_links': {
+            'qkd_link': [
+
+            ]
+        }
+    }
+}
+
+
+def get_side_effect(url):
+
+    steps = url.lstrip('https://').lstrip('http://').rstrip('/')
+    ip_port, _, _, header, *steps = steps.split('/')
+
+    header_splitted = header.split(':')
+
+    module = header_splitted[0]
+    assert(module == 'etsi-qkd-sdn-node')
+
+    tree = {'qkd_node': nodes[ip_port]['node'].copy()}
+
+    if len(header_splitted) == 1 or not header_splitted[1]:
+        value = nodes[ip_port].copy()
+        value.pop('node')
+        tree['qkd_node'].update(value)
+
+        return tree, tree
+    
+    root = header_splitted[1]
+    assert(root == 'qkd_node')
+
+    if not steps:
+        return tree, tree
+
+
+    endpoint, *steps = steps
+    
+    value = nodes[ip_port][endpoint]
+
+    if not steps:
+        return_value = {endpoint:value}
+        tree['qkd_node'].update(return_value)
+
+        return return_value, tree
+
+    
+
+    '''
+    element, *steps = steps
+
+    container, key = element.split('=')
+    
+    # value = value[container][key]
+
+    if not steps:
+        return_value['qkd_node'][endpoint] = [value]
+        return return_value
+
+    '''
+    raise Exception('Url too long')
+
+        
+
+def edit(from_dict, to_dict, create):
+    for key, value in from_dict.items():
+        if isinstance(value, dict):
+            if key not in to_dict and create:
+                to_dict[key] = {}
+            edit(from_dict[key], to_dict[key], create)
+        elif isinstance(value, list):
+            to_dict[key].extend(value)
+        else:
+            to_dict[key] = value
+
+
+
+def edit_side_effect(url, json, create):
+    steps = url.lstrip('https://').lstrip('http://').rstrip('/')
+    ip_port, _, _, header, *steps = steps.split('/')
+
+    module, root = header.split(':')
+
+    assert(module == 'etsi-qkd-sdn-node')
+    assert(root == 'qkd_node')
+
+    if not steps:
+        edit(json, nodes[ip_port]['node'])
+        return
+
+    endpoint, *steps = steps
+
+    if not steps:
+        edit(json[endpoint], nodes[ip_port][endpoint], create)
+        return
+
+
+    '''
+    element, *steps = steps
+
+    container, key = element.split('=')
+
+    if not steps:
+        if key not in nodes[ip_port][endpoint][container] and create:
+            nodes[ip_port][endpoint][container][key] = {}
+
+        edit(json, nodes[ip_port][endpoint][container][key], create)
+        return 0
+    '''
+    
+    raise Exception('Url too long')
+
+
+
+
+
+
+@app.get('/', defaults={'path': ''})
+@app.get("/<string:path>")
+@app.get('/<path:path>')
+def get(path):
+    msg, msg_validate = get_side_effect(request.base_url)
+    print(msg_validate)
+    yang_validator.parse_to_dict(msg_validate)
+    return msg
+
+
+@app.post('/', defaults={'path': ''})
+@app.post("/<string:path>")
+@app.post('/<path:path>')
+def post(path):
+    success = True
+    reason = ''
+    try:
+        edit_side_effect(request.base_url, request.json, True)
+    except Exception as e:
+        reason = str(e)
+        success = False
+    return {'success': success, 'reason': reason}
+    
+
+
+@app.route('/', defaults={'path': ''}, methods=['PUT', 'PATCH'])
+@app.route("/<string:path>", methods=['PUT', 'PATCH'])
+@app.route('/<path:path>', methods=['PUT', 'PATCH'])
+def patch(path):
+    success = True
+    reason = ''
+    try:
+        edit_side_effect(request.base_url, request.json, False)
+    except Exception as e:
+        reason = str(e)
+        success = False
+    return {'success': success, 'reason': reason}
+
+
+
+
+
+# import json
+# from mock import requests
+# import pyangbind.lib.pybindJSON as enc
+# from pyangbind.lib.serialise import pybindJSONDecoder as dec
+# from yang.sbi.qkd.templates.etsi_qkd_sdn_node import etsi_qkd_sdn_node
+
+# module = etsi_qkd_sdn_node()
+# url = 'https://1.1.1.1/restconf/data/etsi-qkd-sdn-node:'
+
+# # Get node all info
+# z = requests.get(url).json()
+# var = dec.load_json(z, None, None, obj=module)
+# print(enc.dumps(var))
+
+
+# Reset module variable because it is already filled
+# module = etsi_qkd_sdn_node()
+
+# # Get node basic info
+# node = module.qkd_node
+# z = requests.get(url + 'qkd_node').json()
+# var = dec.load_json(z, None, None, obj=node)
+# print(enc.dumps(var))
+
+
+# # Get all apps
+# apps = node.qkd_applications
+# z = requests.get(url + 'qkd_node/qkd_applications').json()
+# var = dec.load_json(z, None, None, obj=apps)
+# print(enc.dumps(var))
+
+# # Edit app 0
+# app = apps.qkd_app['00000000-0001-0000-0000-000000000000']
+# app.client_app_id = 'id_0'
+# requests.put(url + 'qkd_node/qkd_applications/qkd_app=00000000-0001-0000-0000-000000000000', json=json.loads(enc.dumps(app)))
+
+# # Create app 1
+# app = apps.qkd_app.add('00000000-0001-0000-0000-000000000001')
+# requests.post(url + 'qkd_node/qkd_applications/qkd_app=00000000-0001-0000-0000-000000000001', json=json.loads(enc.dumps(app)))
+
+# # Get all apps
+# apps = node.qkd_applications
+# z = requests.get(url + 'qkd_node/qkd_applications').json()
+# var = dec.load_json(z, None, None, obj=apps)
+# print(enc.dumps(var))
diff --git a/src/device/tests/qkd/mock_qkd_nodes/start.sh b/src/device/tests/qkd/mock_qkd_nodes/start.sh
new file mode 100644
index 0000000000000000000000000000000000000000..cf8ee753384e28fded56561251adc55faec43a4f
--- /dev/null
+++ b/src/device/tests/qkd/mock_qkd_nodes/start.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+cd "$(dirname "$0")"
+
+
+#!/bin/bash
+killbg() {
+        for p in "${pids[@]}" ; do
+                kill "$p";
+        done
+}
+trap killbg EXIT
+pids=()
+flask --app mock run --host 0.0.0.0 --port 11111 & 
+pids+=($!)
+flask --app mock run --host 0.0.0.0 --port 22222 & 
+pids+=($!)
+flask --app mock run --host 0.0.0.0 --port 33333
diff --git a/src/device/tests/qkd/mock_qkd_nodes/yang/etsi-qkd-node-types.yang b/src/device/tests/qkd/mock_qkd_nodes/yang/etsi-qkd-node-types.yang
new file mode 100644
index 0000000000000000000000000000000000000000..04bbd8a875445a9bcf19266f21b792439bf9005c
--- /dev/null
+++ b/src/device/tests/qkd/mock_qkd_nodes/yang/etsi-qkd-node-types.yang
@@ -0,0 +1,326 @@
+/* Copyright 2022 ETSI
+Licensed under the BSD-3 Clause (https://forge.etsi.org/legal-matters) */
+
+module etsi-qkd-node-types {
+
+  yang-version "1";
+
+  namespace "urn:etsi:qkd:yang:etsi-qkd-node-types";
+
+  prefix "etsi-qkdn-types";
+
+  organization "ETSI ISG QKD";
+
+  contact
+    "https://www.etsi.org/committee/qkd
+    vicente@fi.upm.es";
+
+  description
+    "This module contains the base types created for 
+    the software-defined QKD node information models
+    specified in ETSI GS QKD 015 V2.1.1
+    - QKD-TECHNOLOGY-TYPES
+    - QKDN-STATUS-TYPES
+    - QKD-LINK-TYPES
+    - QKD-ROLE-TYPES
+    - QKD-APP-TYPES
+    - Wavelength
+    ";
+
+  revision "2022-01-30" {
+    description
+      "Refinement of the YANG model to make it compatible with the ETSI ISG QKD 018. Minor fixes.";
+  }
+  
+  revision "2020-09-30" {
+    description
+      "First definition based on initial requirement analysis.";
+  }
+
+  identity QKD-TECHNOLOGY-TYPES {
+  	description "Quantum Key Distribution System base technology types.";
+  }
+
+  identity CV-QKD {
+    base QKD-TECHNOLOGY-TYPES;
+    description "Continuous Variable base technology.";
+  }
+
+  identity DV-QKD {
+    base QKD-TECHNOLOGY-TYPES;
+    description "Discrete Variable base technology.";
+  }
+
+  identity DV-QKD-COW {
+    base QKD-TECHNOLOGY-TYPES;
+    description "COW base technology.";
+  }
+
+  identity DV-QKD-2Ws {
+    base QKD-TECHNOLOGY-TYPES;
+    description "2-Ways base technology.";
+  }
+  
+  typedef qkd-technology-types {
+    type identityref {
+      base QKD-TECHNOLOGY-TYPES;
+    }
+    description "This type represents the base technology types of the SD-QKD system.";
+  }
+  
+  identity QKDN-STATUS-TYPES {
+    description "Base identity used to identify the SD-QKD node status.";
+  }
+  
+  identity NEW {
+    base QKDN-STATUS-TYPES;
+    description "The QKD node is installed.";
+  }
+  
+  identity OPERATING {
+    base QKDN-STATUS-TYPES;
+    description "The QKD node is up.";
+  }
+  
+  identity DOWN {
+    base QKDN-STATUS-TYPES;
+    description "The QKD node is not working as expected.";
+  }
+  
+  identity FAILURE {
+    base QKDN-STATUS-TYPES;
+    description "The QKD node cannot be accessed by SDN controller with communication failure.";
+  }
+  
+  identity OUT {
+    base QKDN-STATUS-TYPES;
+    description "The QKD node is switched off and uninstalled.";
+  }
+  
+  typedef qkdn-status-types {
+    type identityref {
+      base QKDN-STATUS-TYPES;
+    }
+    description "This type represents the status of the SD-QKD node.";
+  }
+
+  identity QKD-LINK-TYPES {
+  	description "QKD key association link types.";
+  }
+
+  identity VIRT {
+    base QKD-LINK-TYPES;
+    description "Virtual Link.";
+  }
+
+  identity PHYS {
+    base QKD-LINK-TYPES;
+    description "Physical Link.";
+  }
+  
+  typedef qkd-link-types {
+    type identityref {
+      base QKD-LINK-TYPES;
+    }
+    description "This type represents the key association link type between two SD-QKD nodes.";
+  }
+
+  identity QKD-ROLE-TYPES {
+  	description "QKD Role Type.";
+  }
+
+  identity TRANSMITTER {
+    base QKD-ROLE-TYPES;
+    description "QKD module working as transmitter.";
+  }
+
+  identity RECEIVER {
+    base QKD-ROLE-TYPES;
+    description "QKD module working as receiver.";
+  }
+
+  identity TRANSCEIVER {
+    base QKD-ROLE-TYPES;
+    description "QKD System that can work as a transmitter or receiver.";
+  }
+  
+  typedef qkd-role-types {
+    type identityref {
+      base QKD-ROLE-TYPES;
+    }
+    description "This type represents the working mode of a SD-QKD module.";
+  }
+
+  identity QKD-APP-TYPES {
+  	description "Application types.";
+  }
+
+  identity CLIENT {
+    base QKD-APP-TYPES;
+    description "Application working as client.";
+  }
+
+  identity INTERNAL {
+    base QKD-APP-TYPES;
+    description "Internal QKD node application.";
+  }
+  
+  typedef qkd-app-types {
+    type identityref {
+      base QKD-APP-TYPES;
+    }
+    description "This type represents the application class consuming key from SD-QKD nodes.";
+  }
+
+  identity PHYS-PERF-TYPES {
+    description "Physical performance types.";
+  }
+
+  identity QBER {
+    base PHYS-PERF-TYPES;
+    description "Quantum Bit Error Rate.";
+  }
+
+  identity SNR {
+    base PHYS-PERF-TYPES;
+    description "Signal to Noise Ratio.";
+  }
+  
+  typedef phys-perf-types {
+    type identityref {
+      base PHYS-PERF-TYPES;
+    }
+    description "This type represents physical performance types.";
+  }
+
+  identity LINK-STATUS-TYPES {
+    description "Status of the key association QKD link (physical and virtual).";
+  }
+
+  identity ACTIVE {
+    base LINK-STATUS-TYPES;
+    description "Link actively generating keys.";
+  }
+
+  identity PASSIVE {
+    base LINK-STATUS-TYPES;
+    description "No key generation on key association QKD link but a pool of keys
+    are still available.";
+  }
+
+  identity PENDING {
+    base LINK-STATUS-TYPES;
+    description "Waiting for activation and no keys are available.";
+  }
+
+  identity OFF {
+    base LINK-STATUS-TYPES;
+    description "No key generation and no keys are available.";
+  }
+  
+  typedef link-status-types {
+    type identityref {
+      base LINK-STATUS-TYPES;
+    }
+    description "This type represents the status of a key association QKD link, both physical and virtual.";
+  }
+
+  ///
+  
+  identity IFACE-STATUS-TYPES {
+  	description "Interface Status.";
+  }
+
+  identity ENABLED {
+    base IFACE-STATUS-TYPES;
+    description "The interfaces is up.";
+  }
+
+  identity DISABLED {
+    base IFACE-STATUS-TYPES;
+    description "The interfaces is down.";
+  }
+
+  identity FAILED {
+    base IFACE-STATUS-TYPES;
+    description "The interfaces has failed.";
+  }
+  
+  typedef iface-status-types {
+    type identityref {
+      base IFACE-STATUS-TYPES;
+    }
+    description "This type represents the status of a interface between a SD-QKD node and a SD-QKD module.";
+  }
+
+  identity APP-STATUS-TYPES {
+  	description "Application types.";
+  }
+
+  identity ON {
+    base APP-STATUS-TYPES;
+    description "The application is on.";
+  }
+
+  identity DISCONNECTED {
+    base APP-STATUS-TYPES;
+    description "The application is disconnected.";
+  }
+
+  identity OUT-OF-TIME {
+    base APP-STATUS-TYPES;
+    description "The application is out of time.";
+  }
+
+  identity ZOMBIE {
+    base APP-STATUS-TYPES;
+    description "The application is in a zombie state.";
+  }
+  
+  typedef app-status-types {
+    type identityref {
+      base APP-STATUS-TYPES;
+    }
+    description "This type represents the status of an application  consuming key from SD-QKD nodes.";
+  }
+
+  identity SEVERITY-TYPES {
+  	description "Error/Failure severity levels.";
+  }
+
+  identity MAJOR {
+    base SEVERITY-TYPES;
+    description "Major error/failure.";
+  }
+
+  identity MINOR {
+    base SEVERITY-TYPES;
+    description "Minor error/failure.";
+  }
+  
+  typedef severity-types {
+    type identityref {
+      base SEVERITY-TYPES;
+    }
+    description "This type represents the Error/Failure severity levels.";
+  }
+
+  typedef wavelength {
+  		type string {
+                pattern "([1-9][0-9]{0,3})";
+            }
+            description
+                "A WDM channel number (starting at 1). For example: 20";
+  }
+
+  //Pattern from "A Yang Data Model for WSON Optical Networks".
+  typedef wavelength-range-type {
+            type string {
+                pattern "([1-9][0-9]{0,3}(-[1-9][0-9]{0,3})?" +
+                        "(,[1-9][0-9]{0,3}(-[1-9][0-9]{0,3})?)*)";
+            }
+            description
+                "A list of WDM channel numbers (starting at 1)
+                 in ascending order. For example: 1,12-20,40,50-80";
+  }
+}
diff --git a/src/device/tests/qkd/mock_qkd_nodes/yang/etsi-qkd-sdn-node.yang b/src/device/tests/qkd/mock_qkd_nodes/yang/etsi-qkd-sdn-node.yang
new file mode 100644
index 0000000000000000000000000000000000000000..d07004cdc5b558adc5a9c0b6acb32adac0d7cc11
--- /dev/null
+++ b/src/device/tests/qkd/mock_qkd_nodes/yang/etsi-qkd-sdn-node.yang
@@ -0,0 +1,941 @@
+/* Copyright 2022 ETSI
+Licensed under the BSD-3 Clause (https://forge.etsi.org/legal-matters) */
+
+module etsi-qkd-sdn-node {
+
+  yang-version "1";
+
+  namespace "urn:etsi:qkd:yang:etsi-qkd-node";
+
+  prefix "etsi-qkdn";
+  
+  import ietf-yang-types { prefix "yang"; }
+  import ietf-inet-types { prefix "inet"; }
+  import etsi-qkd-node-types { prefix "etsi-qkdn-types"; }
+
+  // meta
+  organization "ETSI ISG QKD";
+
+  contact
+    "https://www.etsi.org/committee/qkd
+    vicente@fi.upm.es";
+
+  description
+    "This module contains the groupings and containers composing 
+    the software-defined QKD node information models
+    specified in ETSI GS QKD 015 V2.1.1";
+
+  revision "2022-01-30" {
+    description
+      "Refinement of the YANG model to make it compatible with the ETSI ISG QKD 018. Minor fixes.";
+    reference
+      "ETSI GS QKD 015 V2.1.1 (2022-01)";
+  }
+
+  revision "2020-09-30" {
+    description
+      "First definition based on initial requirement analysis.";
+    reference
+      "ETSI GS QKD 015 V1.1.1 (2021-03)";
+  }
+  
+  grouping qkdn_id {
+    description "Grouping of qkdn_id leaf.";
+    
+    leaf qkdn_id {
+      type yang:uuid;
+      mandatory true;
+      description
+        "This value reflects the unique ID of the SD-QKD node.";
+    }
+  }
+  
+  grouping qkdn_version {
+    description "Grouping of qkdn_version leaf.";
+    
+    leaf qkdn_version {
+      type string;
+      description "Hardware or software version of the SD-QKD node.";
+    }
+  }
+
+  grouping qkdn_location_id {
+    description "Grouping of qkdn_location_id leaf.";
+    
+    leaf qkdn_location_id {
+      type string;
+      default "";
+      description
+        "This value enables the location of the secure
+        area that contains the SD-QKD node to be specified.";
+    }
+  }
+
+  grouping qkdn_status {
+    description "Grouping of qkdn_status leaf.";
+    
+    leaf qkdn_status {
+      type etsi-qkdn-types:qkdn-status-types;
+      config false;
+      description "Status of the SD-QKD node.";
+    }
+  }
+
+  grouping qkdn_capabilities {
+    description "Grouping of the capabilities of the SD-QKD node.";
+    
+    container qkdn_capabilities {
+      description "Capabilities of the SD-QKD node.";
+
+      leaf link_stats_support {
+        type boolean;
+        default true;
+        description
+          "If true, this node exposes link-related statistics (secure key 
+          generation rate-SKR, link consumption, status, QBER).";
+      }
+
+      leaf application_stats_support {
+        type boolean;
+        default true;
+        description "If true, this node exposes application related 
+          statistics (application consumption, alerts).";
+      }
+
+      leaf key_relay_mode_enable {
+        type boolean;
+        default true;
+        description "If true, this node supports key relay (multi-hop) mode services.";
+      }
+    }
+  }
+  
+  grouping app_id {
+    description "Grouping of app_id leaf.";
+    
+    leaf app_id {
+      type yang:uuid;
+      description
+        "Unique ID that identifies a QKD application consisting of a set of entities 
+        that are allowed to receive keys shared with each other from the SD-QKD nodes 
+        they connect to. This value is similar to a key ID or key handle.";
+    }
+  }
+  
+  grouping app_basic {
+    description "Grouping of app's basic parameters.";
+    
+    uses app_id;
+        
+    leaf app_status {
+      type etsi-qkdn-types:app-status-types;
+      config false;
+      description "Status of the application.";
+    }
+  }
+  
+  grouping app_priority {
+    description "Grouping of app_priority leaf.";
+    
+    leaf app_priority {
+      type uint32;
+      default 0;
+      description "Priority of the association/application 
+        might be defined by the user but usually 
+        handled by a network administrator.";
+    }
+  }
+  
+  grouping app_details {
+    description "Grouping of app's details parameters.";
+    
+    leaf app_type {
+      type etsi-qkdn-types:qkd-app-types;
+      description "Type of the registered application. These
+        values, defined within the types module, can be client
+        (if an external applications requesting keys)
+        or internal (application is defined to maintain
+        the QKD - e.g. multi-hop, authentication or
+        other encryption operations).";
+    }
+    
+    leaf server_app_id {
+      type inet:uri;
+      description "ID that identifies the entity that initiated the 
+      creation of the QKD application to receive keys shared with one 
+      or more specified target entity identified by client_app_id.  
+      It is a client in the interface to the SD-QKD node and the name 
+      server_app_id reflects that it requested the QKD application to 
+      be initiated.";
+    }
+
+    leaf-list client_app_id {
+      type inet:uri;
+      description "List of IDs that identifies the one or more 
+      entities that are allowed to receive keys from SD-QKD 
+      node(s) under the QKD application in addition to the 
+      initiating entity identified by server_app_id.";
+    }
+
+    uses app_priority;
+  }
+  
+  grouping local_qkdn_id {
+    description "Grouping of local_qkdn_id leaf.";
+    
+    leaf local_qkdn_id {
+      type yang:uuid;
+      description "Unique ID of the local SD-QKD node which
+        is providing QKD keys to the local application.";
+    }
+  }
+  
+  grouping app_time {
+    description "Grouping of app's time parameters.";
+    
+    leaf creation_time {
+      type yang:date-and-time;
+      config false;
+      description "Date and time of the service creation.";
+    }
+
+    leaf expiration_time {
+      type yang:date-and-time;
+      description "Date and time of the service expiration.";
+    }
+  }
+  
+  grouping app_statistics {
+    description "Grouping of app's statistic parameters.";
+    
+    container app_statistics {
+      description "Statistical information relating to a specific statistic period of time.";
+
+      list statistics {
+        key "end_time";
+        config false;
+        description "List of statistics.";
+
+        leaf end_time {
+          type yang:date-and-time;
+          config false;
+          description "End time for the statistic period.";
+        }
+
+        leaf start_time {
+          type yang:date-and-time;
+          config false;
+          description "Start time for the statistic period.";
+        }
+
+        leaf consumed_bits {
+          type uint32;
+          config false;
+          description "Consumed secret key amount (in bits) for a statistics collection period of time.";
+        }
+      }
+    }
+  }
+  
+  grouping app_qos {
+    description "Grouping of app's basic qos parameters.";
+    
+    container app_qos {
+      description "Requested Quality of Service.";
+      
+      leaf max_bandwidth {
+        type uint32;
+        description "Maximum bandwidth (in bits per second) allowed for 
+        this specific application. Exceeding this value will raise an 
+        error from the local key store to the appl. This value might 
+        be internally configured (or by an admin) with a default value.";
+      }
+
+      leaf min_bandwidth {
+        type uint32;
+        description "This value is an optional QoS parameter which 
+          enables to require a minimum key rate (in bits per second) 
+          for the application.";
+      }
+
+      leaf jitter {
+        type uint32;
+        description "This value allows to specify the maximum jitter 
+          (in msec) to be provided by the key delivery API for 
+          applications requiring fast rekeying. This value can be 
+          coordinated with the other QoS to provide a wide enough 
+          QoS definition.";
+      }
+
+      leaf ttl {
+        type uint32;
+        description "This value is used to specify the maximum time 
+          (in seconds) that a key could be kept in the key store for 
+          a given application without being used.";
+      }
+    }
+  }
+  
+  grouping augmented_app_qos {
+    description "Grouping of app's detailed qos parameters.";
+    
+    uses app_qos {
+      augment app_qos {
+        description "Augmentation of app's basic parameters with app's detailed qos parameters.";
+
+        leaf clients_shared_path_enable {
+          type boolean;
+          default false;
+          description "If true, multiple clients for this 
+            application might share keys to reduce service 
+            impact (consumption).";
+        }
+
+        leaf clients_shared_keys_required {
+          type boolean;
+          default false;
+          description "If true, multiple clients for this application
+            might share keys to reduce service impact (consumption).";
+        }
+      }
+    }
+  }
+
+  grouping qkd_applications {
+    description "Grouping of the list of applications container.";
+    
+    container qkd_applications {
+      description "List of applications container.";
+
+      list qkd_app {
+        key "app_id";
+        description "List of applications that are currently registered
+          in the SD-QKD node. Any entity consuming QKD-derived keys (either 
+          for internal or external purposes) is considered an application.";
+   
+        uses app_basic;
+    
+        uses app_details;
+
+        uses app_time;
+        
+        uses app_statistics;
+        
+        uses augmented_app_qos;
+
+        leaf-list backing_qkdl_id {
+          type yang:uuid;
+          description "Unique ID of the key association link which is 
+            providing QKD keys to these applications.";
+        }
+
+        uses local_qkdn_id;
+
+        leaf remote_qkdn_id {
+          type yang:uuid;
+          description "Unique ID of the remote SD-QKD node which 
+            is providing QKD keys to the remote application. 
+            While unknown, the local SD-QKD will not be able to 
+            provide keys to the local application.";
+        }
+      }
+    }
+  }
+
+  grouping qkdi_status {
+    description "Grouping of qkdi_status leaf.";
+    
+    leaf qkdi_status {
+      type etsi-qkdn-types:iface-status-types;
+      config false;
+      description "Status of a QKD interface of the SD-QKD node.";
+    }
+  }
+  
+  grouping qkdi_model {
+    description "Grouping of qkdi_model leaf.";
+    
+    leaf qkdi_model {
+      type string;
+      description "Device model (vendor/device).";
+    }
+  }
+  
+  grouping qkdi_type {
+    description "Grouping of qkdi_type leaf.";
+    
+    leaf qkdi_type {
+      type etsi-qkdn-types:qkd-technology-types;
+      description "Interface type (QKD  technology).";
+    }
+  }
+  
+  grouping qkdi_att_point {
+    description "Grouping of the interface attachment points to an optical switch.";
+    
+    container qkdi_att_point {
+      description "Interface attachment point to an optical switch.";
+
+      leaf device {
+        type string;
+        description "Unique ID of the optical switch (or
+        passive component) to which the interface is connected.";
+      }
+
+      leaf port {
+        type uint32;
+        description "Port ID of the device to which the interface
+        is connected.";
+      }
+    }
+  }
+  
+  grouping qkdi_id {
+    description "Grouping of qkdi_id leaf.";
+    
+    leaf qkdi_id {
+      type uint32;
+      description "Interface id. It is described as a locally unique number, 
+      which is globally unique when combined with the SD-QKD node ID.";
+    }
+  }
+  
+  grouping qkd_interface_item {
+    description "Grouping of the interface parameters.";
+  
+    uses qkdi_id;
+
+    uses qkdi_model;
+
+    uses qkdi_type;
+
+    uses qkdi_att_point;
+
+    container qkdi_capabilities {
+      description "Capabilities of the QKD system (interface).";
+
+      leaf role_support {
+        type etsi-qkdn-types:qkd-role-types;
+        description "QKD node support for key relay mode services.";
+      }
+
+      leaf wavelength_range {
+        type etsi-qkdn-types:wavelength-range-type;
+        description "Range of supported wavelengths (nm) (multiple
+          if it contains a tunable laser).";
+      }
+
+      leaf max_absorption {
+        type decimal64 {
+          fraction-digits 3;
+        }
+        description "Maximum absorption supported (in dB).";
+      }
+    }
+  }
+  
+  grouping qkd_interfaces {
+    description "Grouping of the list of interfaces.";
+  
+    container qkd_interfaces {
+      description "List of interfaces container.";
+
+      list qkd_interface {
+        key "qkdi_id";
+        description "List of physical QKD modules in a secure location,
+          abstracted as interfaces of the SD-QKD node.";
+
+        uses qkd_interface_item;
+        
+        uses qkdi_status;
+        
+      }
+    }
+  }
+  
+  grouping qkdl_id {
+    description "Grouping of qkdl_id leaf.";
+    
+    leaf qkdl_id {
+      type yang:uuid;
+      description "Unique ID of the QKD link (key association).";
+    }
+  }
+  
+  grouping qkdl_status {
+    description "Grouping of qkdl_status leaf.";
+    
+    leaf qkdl_status {
+      type etsi-qkdn-types:link-status-types;
+      description "Status of the QKD key association link.";
+    }
+  }
+
+  grouping common_performance {
+    description "Grouping of common performance parameters.";
+    
+    leaf expected_consumption {
+      type uint32;
+      config false;
+      description "Sum of all the application's bandwidth (in bits per 
+        second) on this particular key association link.";
+    }
+    
+    leaf skr {
+      type uint32;
+      config false;
+      description "Secret key rate generation (in bits per second) 
+        of the key association link.";
+    }
+
+    leaf eskr {
+      type uint32;
+      config false;
+      description "Effective secret key rate (in bits per second) generation 
+        of the key association link available after internal consumption.";
+    }
+  }
+
+  grouping physical_link_perf {
+    description "Grouping of the list of physical performance parameters.";
+    
+    list phys_perf {
+      key "perf_type";
+      config false;
+      description "List of physical performance parameters.";
+
+      leaf perf_type {
+        type etsi-qkdn-types:phys-perf-types;
+        config false;
+        description "Type of the physical performance value to be
+          exposed to the controller.";
+      }
+
+      leaf value {
+        type decimal64 {
+          fraction-digits 3;
+        }
+        config false;
+        description "Numerical value for the performance parameter 
+          type specified above.";
+      }
+    }
+  }
+
+  grouping virtual_link_spec {
+    description "Grouping of the virtual link's parameters.";
+    
+    leaf virt_prev_hop {
+      type yang:uuid;
+      description "Previous hop in a multi-hop/virtual key
+        association link config.";
+    }
+
+    leaf-list virt_next_hop {
+      type yang:uuid;
+      description "Next hop(s) in a multihop/virtual key 
+        association link config. Defined as a list for multicast 
+        over shared sub-paths.";
+    }
+
+    leaf virt_bandwidth {
+      type uint32;
+      description "Required bandwidth (in bits per second) for that key association link. 
+        Used to reserve bandwidth from the physical QKD links to support the virtual key 
+        association link as an internal application.";
+    }
+  }
+
+  grouping physical_link_spec {
+    description "Grouping of the physical link's parameters.";
+    
+    leaf phys_channel_att {
+      type decimal64 {
+        fraction-digits 3;
+      }
+      description "Expected attenuation on the quantum channel (in dB) 
+        between the Source/qkd_node and Destination/qkd_node.";
+      
+    }
+            
+    leaf phys_wavelength {
+      type etsi-qkdn-types:wavelength;    
+      description "Wavelength (in nm) to be used for the quantum channel. 
+        If the interface is not tunable, this configuration could be bypassed";
+    }
+
+    leaf phys_qkd_role {
+      type etsi-qkdn-types:qkd-role-types;
+      description "Transmitter/receiver mode for the QKD module. 
+        If there is no multi-role support, this could be ignored.";
+    }
+  }
+
+  grouping qkd_links {
+    description "Grouping of the list of links.";
+    
+    container qkd_links {
+      description "List of links container";
+      
+      list qkd_link {
+        key "qkdl_id";
+        description "List of (key association) links to other SD-QKD nodes in the network.
+          The links can be physical (direct quantum channel) or virtual multi-hop 
+          connection doing key-relay through several nodes.";
+
+        uses qkdl_id;
+        
+        uses qkdl_status;
+
+        leaf qkdl_enable {
+          type boolean;
+          default true;
+          description "This value allows to enable of disable the key generation 
+            process for a given link.";
+
+        }
+
+        container qkdl_local {
+          description "Source (local) node of the SD-QKD link.";
+
+          leaf qkdn_id {
+            type yang:uuid;
+            description "Unique ID of the local SD-QKD node.";
+          }
+
+          leaf qkdi_id {
+            type uint32;
+            description "Interface used to create the key association link.";
+          }
+        }
+
+        container qkdl_remote {
+          description "Destination (remote) unique SD-QKD node.";
+
+          leaf qkdn_id {
+            type yang:uuid;
+            description "Unique ID of the remote SD-QKD node. This value is
+              provided by the SDN controller when the key association link 
+              request arrives.";
+          }
+
+          leaf qkdi_id {
+            type uint32;
+            description "Interface used to create the link.";
+          }
+        }
+
+        leaf qkdl_type {
+          type etsi-qkdn-types:qkd-link-types;
+          description "Key Association Link type: Virtual (multi-hop) or Direct.";
+        }
+
+        leaf-list qkdl_applications {
+          type yang:uuid;
+          description "Applications which are consuming keys from
+           this key association link.";
+        }
+
+        uses virtual_link_spec {
+          when "qkdl_type = 'etsi-qkd-node-types:VIRT'" {
+            description "Virtual key association link specific configuration.";
+          }
+        }
+
+        uses physical_link_spec {
+          when "qkdl_type = 'etsi-qkd-node-types:PHYS'" {
+            description "Physical key association link specific configuration.";
+          }
+        }
+
+        container qkdl_performance {
+          description "Container of link's performace parameters.";
+
+          uses common_performance;
+
+          uses physical_link_perf {
+            when "../qkdl_type = 'PHYS'" {
+              description "Performance of the specific physical link.";
+            }
+          }
+        }
+      }
+    }
+  }
+
+  container qkd_node {
+    description
+      "Top module describing a software-defined QKD node (SD-QKD node).";
+
+    uses qkdn_id;
+    
+    uses qkdn_status;
+    
+    uses qkdn_version;
+
+    uses qkdn_location_id;
+
+    uses qkdn_capabilities;
+    
+    uses qkd_applications;
+
+    uses qkd_interfaces;
+
+    uses qkd_links;
+  }
+  
+  grouping message {
+    description "Grouping of message leaf.";
+    
+    leaf message {
+      type string;
+      description "Placeholder for the message.";
+    }
+  }
+
+  grouping severity {
+    description "Grouping of severity leaf.";
+    
+    leaf severity {
+      type etsi-qkdn-types:severity-types;
+      description "Placeholder for the severity.";
+    }
+  }
+  
+  grouping reason {
+    description "Grouping of reason leaf.";
+    
+    leaf reason {
+      type string;
+      description "Auxiliary parameter to include additional
+        information about the reason for link failure.";
+    }
+  }
+
+  notification sdqkdn_application_new {
+    description "Defined for the controller to detect new applications 
+      requesting keys from a QKD node. This maps with the workflow shown 
+      in clause 5.2 'QKD Application Registration'. Parameters such as 
+      client and server app IDs, local QKD node identifier, priority and 
+      QoS are sent in the notification.";
+    
+    container qkd_application {
+      description "'sdqkdn_application_new' notification's qkd_application parameters.";
+    
+      uses app_details;
+
+      uses local_qkdn_id;
+     
+      uses augmented_app_qos;
+      
+    }
+  }
+
+  notification sdqkdn_application_qos_update {
+    description "Notification that includes information about priority or 
+      QoS changes on an existing and already registered application.";
+      
+    container qkd_application {
+      description "'sdqkdn_application_qos_update' notification's qkd_application parameters.";
+    
+      uses app_id;
+     
+      uses augmented_app_qos;
+
+      uses app_priority;
+      
+    }
+  }
+
+  notification sdqkdn_application_disconnected {
+    description "Includes the application identifier to inform that the 
+      application is no longer registered and active in the QKD node.";
+      
+    container qkd_application {
+      description "'sdqkdn_application_disconnected' notification's qkd_application parameters.";
+    
+      uses app_id;
+      
+    }
+  }
+
+  notification sdqkdn_interface_new {
+    description "Includes all the information about the new QKD system 
+      installed in the secure location of a given QKD node.";
+    
+    container qkd_interface {
+      description "'sdqkdn_interface_new' notification's qkd_interface parameters.";
+    
+      uses qkd_interface_item;
+      
+    }
+  }
+
+  notification sdqkdn_interface_down {
+    description "Identifies an interface within a QKD node which is not 
+      working as expected, allowing additional information to be included 
+      in a 'reason' string field.";
+    
+    container qkd_interface {
+      description "'sdqkdn_interface_down' notification's qkd_interface parameters.";
+      
+      uses qkdi_id;
+
+      uses reason;
+      
+    }
+  }
+
+  notification sdqkdn_interface_out {
+    description "Contains the ID of an interface which is switch off and 
+      uninstall from a QKD node. This information can be gathered from this 
+      notification or from regular polling from the controller's side.";
+    
+    container qkd_interface {
+      description "'sdqkdn_interface_out' notification's qkd_interface parameters.";
+      
+      uses qkdi_id;
+      
+    }
+  }
+
+  notification sdqkdn_link_down {
+    description "As in the interface down event, this notification contains
+      the identifier of a given link which has gone down unexpectedly. 
+      In addition, further information can be sent in the 'reason' field.";
+    
+    container qkd_link {
+      description "'sdqkdn_link_down' notification's qkd_link parameters.";
+
+      uses qkdl_id;
+
+      uses reason;
+      
+    }
+  }
+
+  notification sdqkdn_link_perf_update {
+    description "This notification allows to inform of any mayor 
+      modification in the performance of an active link. The identifier 
+      of the link is sent together with the performance parameters of the link.";
+
+    container qkd_link {
+      description "'sdqkdn_link_perf_update' notification's qkd_link parameters.";
+
+      uses qkdl_id;
+
+      container performance {
+      description "'sdqkdn_link_perf_update' notification's performance parameters.";
+
+        uses common_performance;
+
+        uses physical_link_perf;
+  
+      }   
+    }
+  }
+
+  notification sdqkdn_link_overloaded {
+    description "This notification is sent when the link cannot cope with the 
+      demand. The link identifier is sent with the expected consumption and 
+      general performance parameters.";
+    
+    container qkd_link {
+      description "'sdqkdn_link_overloaded' notification's qkd_link parameters.";
+
+      uses qkdl_id;
+
+      container performance {
+      description "'sdqkdn_link_overloaded' notification's performance parameters.";
+
+        uses common_performance;
+  
+      }   
+    }
+  }
+
+  notification alarm {
+    description "'alarm' notification.";
+
+    container link {
+      description "'alarm' notification's link parameters.";
+
+      uses qkdl_id;
+
+      uses qkdl_status;  
+
+      uses message;
+        
+      uses severity;
+        
+    }
+
+    container interface {
+      description "'alarm' notification's interface parameters.";
+
+      uses qkdi_id;
+        
+      uses qkdi_status;
+
+      uses message;
+        
+      uses severity;
+        
+    }
+
+    container application {
+      description "'alarm' notification's application parameters.";
+
+      uses app_basic;
+
+      uses message;
+        
+      uses severity;
+        
+    }
+
+  }
+
+  notification event {
+    description "'event' notification.";
+
+    container link {
+      description "'alarm' notification's link parameters.";
+      
+      uses qkdl_id;
+
+      uses qkdl_status;    
+
+      uses message;
+        
+      uses severity;
+        
+    }
+
+    container interface {
+      description "'alarm' notification's interface parameters.";
+
+      uses qkdi_id;
+        
+      uses qkdi_status;
+
+      uses message;
+        
+      uses severity;
+        
+    }
+
+    container application {
+      description "'alarm' notification's application parameters.";
+
+      uses app_basic;
+
+      uses message;
+        
+      uses severity;
+        
+    }
+
+  }
+
+}
diff --git a/src/device/tests/qkd/mock_qkd_nodes/yang/ietf-inet-types.yang b/src/device/tests/qkd/mock_qkd_nodes/yang/ietf-inet-types.yang
new file mode 100644
index 0000000000000000000000000000000000000000..eacefb6363de1beb543567a0fa705571b7dc57a2
--- /dev/null
+++ b/src/device/tests/qkd/mock_qkd_nodes/yang/ietf-inet-types.yang
@@ -0,0 +1,458 @@
+module ietf-inet-types {
+
+  namespace "urn:ietf:params:xml:ns:yang:ietf-inet-types";
+  prefix "inet";
+
+  organization
+   "IETF NETMOD (NETCONF Data Modeling Language) Working Group";
+
+  contact
+   "WG Web:   <http://tools.ietf.org/wg/netmod/>
+    WG List:  <mailto:netmod@ietf.org>
+
+    WG Chair: David Kessens
+              <mailto:david.kessens@nsn.com>
+
+    WG Chair: Juergen Schoenwaelder
+              <mailto:j.schoenwaelder@jacobs-university.de>
+
+    Editor:   Juergen Schoenwaelder
+              <mailto:j.schoenwaelder@jacobs-university.de>";
+
+  description
+   "This module contains a collection of generally useful derived
+    YANG data types for Internet addresses and related things.
+
+    Copyright (c) 2013 IETF Trust and the persons identified as
+    authors of the code.  All rights reserved.
+
+    Redistribution and use in source and binary forms, with or
+    without modification, is permitted pursuant to, and subject
+    to the license terms contained in, the Simplified BSD License
+    set forth in Section 4.c of the IETF Trust's Legal Provisions
+    Relating to IETF Documents
+    (http://trustee.ietf.org/license-info).
+
+    This version of this YANG module is part of RFC 6991; see
+    the RFC itself for full legal notices.";
+
+  revision 2013-07-15 {
+    description
+     "This revision adds the following new data types:
+      - ip-address-no-zone
+      - ipv4-address-no-zone
+      - ipv6-address-no-zone";
+    reference
+     "RFC 6991: Common YANG Data Types";
+  }
+
+  revision 2010-09-24 {
+    description
+     "Initial revision.";
+    reference
+     "RFC 6021: Common YANG Data Types";
+  }
+
+  /*** collection of types related to protocol fields ***/
+
+  typedef ip-version {
+    type enumeration {
+      enum unknown {
+        value "0";
+        description
+         "An unknown or unspecified version of the Internet
+          protocol.";
+      }
+      enum ipv4 {
+        value "1";
+        description
+         "The IPv4 protocol as defined in RFC 791.";
+      }
+      enum ipv6 {
+        value "2";
+        description
+         "The IPv6 protocol as defined in RFC 2460.";
+      }
+    }
+    description
+     "This value represents the version of the IP protocol.
+
+      In the value set and its semantics, this type is equivalent
+      to the InetVersion textual convention of the SMIv2.";
+    reference
+     "RFC  791: Internet Protocol
+      RFC 2460: Internet Protocol, Version 6 (IPv6) Specification
+      RFC 4001: Textual Conventions for Internet Network Addresses";
+  }
+
+  typedef dscp {
+    type uint8 {
+      range "0..63";
+    }
+    description
+     "The dscp type represents a Differentiated Services Code Point
+      that may be used for marking packets in a traffic stream.
+      In the value set and its semantics, this type is equivalent
+      to the Dscp textual convention of the SMIv2.";
+    reference
+     "RFC 3289: Management Information Base for the Differentiated
+                Services Architecture
+      RFC 2474: Definition of the Differentiated Services Field
+                (DS Field) in the IPv4 and IPv6 Headers
+      RFC 2780: IANA Allocation Guidelines For Values In
+                the Internet Protocol and Related Headers";
+  }
+
+  typedef ipv6-flow-label {
+    type uint32 {
+      range "0..1048575";
+    }
+    description
+     "The ipv6-flow-label type represents the flow identifier or Flow
+      Label in an IPv6 packet header that may be used to
+      discriminate traffic flows.
+
+      In the value set and its semantics, this type is equivalent
+      to the IPv6FlowLabel textual convention of the SMIv2.";
+    reference
+     "RFC 3595: Textual Conventions for IPv6 Flow Label
+      RFC 2460: Internet Protocol, Version 6 (IPv6) Specification";
+  }
+
+  typedef port-number {
+    type uint16 {
+      range "0..65535";
+    }
+    description
+     "The port-number type represents a 16-bit port number of an
+      Internet transport-layer protocol such as UDP, TCP, DCCP, or
+      SCTP.  Port numbers are assigned by IANA.  A current list of
+      all assignments is available from <http://www.iana.org/>.
+
+      Note that the port number value zero is reserved by IANA.  In
+      situations where the value zero does not make sense, it can
+      be excluded by subtyping the port-number type.
+      In the value set and its semantics, this type is equivalent
+      to the InetPortNumber textual convention of the SMIv2.";
+    reference
+     "RFC  768: User Datagram Protocol
+      RFC  793: Transmission Control Protocol
+      RFC 4960: Stream Control Transmission Protocol
+      RFC 4340: Datagram Congestion Control Protocol (DCCP)
+      RFC 4001: Textual Conventions for Internet Network Addresses";
+  }
+
+  /*** collection of types related to autonomous systems ***/
+
+  typedef as-number {
+    type uint32;
+    description
+     "The as-number type represents autonomous system numbers
+      which identify an Autonomous System (AS).  An AS is a set
+      of routers under a single technical administration, using
+      an interior gateway protocol and common metrics to route
+      packets within the AS, and using an exterior gateway
+      protocol to route packets to other ASes.  IANA maintains
+      the AS number space and has delegated large parts to the
+      regional registries.
+
+      Autonomous system numbers were originally limited to 16
+      bits.  BGP extensions have enlarged the autonomous system
+      number space to 32 bits.  This type therefore uses an uint32
+      base type without a range restriction in order to support
+      a larger autonomous system number space.
+
+      In the value set and its semantics, this type is equivalent
+      to the InetAutonomousSystemNumber textual convention of
+      the SMIv2.";
+    reference
+     "RFC 1930: Guidelines for creation, selection, and registration
+                of an Autonomous System (AS)
+      RFC 4271: A Border Gateway Protocol 4 (BGP-4)
+      RFC 4001: Textual Conventions for Internet Network Addresses
+      RFC 6793: BGP Support for Four-Octet Autonomous System (AS)
+                Number Space";
+  }
+
+  /*** collection of types related to IP addresses and hostnames ***/
+
+  typedef ip-address {
+    type union {
+      type inet:ipv4-address;
+      type inet:ipv6-address;
+    }
+    description
+     "The ip-address type represents an IP address and is IP
+      version neutral.  The format of the textual representation
+      implies the IP version.  This type supports scoped addresses
+      by allowing zone identifiers in the address format.";
+    reference
+     "RFC 4007: IPv6 Scoped Address Architecture";
+  }
+
+  typedef ipv4-address {
+    type string {
+      pattern
+        '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}'
+      +  '([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])'
+      + '(%[\p{N}\p{L}]+)?';
+    }
+    description
+      "The ipv4-address type represents an IPv4 address in
+       dotted-quad notation.  The IPv4 address may include a zone
+       index, separated by a % sign.
+
+       The zone index is used to disambiguate identical address
+       values.  For link-local addresses, the zone index will
+       typically be the interface index number or the name of an
+       interface.  If the zone index is not present, the default
+       zone of the device will be used.
+
+       The canonical format for the zone index is the numerical
+       format";
+  }
+
+  typedef ipv6-address {
+    type string {
+      pattern '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}'
+            + '((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|'
+            + '(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\.){3}'
+            + '(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))'
+            + '(%[\p{N}\p{L}]+)?';
+      pattern '(([^:]+:){6}(([^:]+:[^:]+)|(.*\..*)))|'
+            + '((([^:]+:)*[^:]+)?::(([^:]+:)*[^:]+)?)'
+            + '(%.+)?';
+    }
+    description
+     "The ipv6-address type represents an IPv6 address in full,
+      mixed, shortened, and shortened-mixed notation.  The IPv6
+      address may include a zone index, separated by a % sign.
+
+      The zone index is used to disambiguate identical address
+      values.  For link-local addresses, the zone index will
+      typically be the interface index number or the name of an
+      interface.  If the zone index is not present, the default
+      zone of the device will be used.
+
+      The canonical format of IPv6 addresses uses the textual
+      representation defined in Section 4 of RFC 5952.  The
+      canonical format for the zone index is the numerical
+      format as described in Section 11.2 of RFC 4007.";
+    reference
+     "RFC 4291: IP Version 6 Addressing Architecture
+      RFC 4007: IPv6 Scoped Address Architecture
+      RFC 5952: A Recommendation for IPv6 Address Text
+                Representation";
+  }
+
+  typedef ip-address-no-zone {
+    type union {
+      type inet:ipv4-address-no-zone;
+      type inet:ipv6-address-no-zone;
+    }
+    description
+     "The ip-address-no-zone type represents an IP address and is
+      IP version neutral.  The format of the textual representation
+      implies the IP version.  This type does not support scoped
+      addresses since it does not allow zone identifiers in the
+      address format.";
+    reference
+     "RFC 4007: IPv6 Scoped Address Architecture";
+  }
+
+  typedef ipv4-address-no-zone {
+    type inet:ipv4-address {
+      pattern '[0-9\.]*';
+    }
+    description
+      "An IPv4 address without a zone index.  This type, derived from
+       ipv4-address, may be used in situations where the zone is
+       known from the context and hence no zone index is needed.";
+  }
+
+  typedef ipv6-address-no-zone {
+    type inet:ipv6-address {
+      pattern '[0-9a-fA-F:\.]*';
+    }
+    description
+      "An IPv6 address without a zone index.  This type, derived from
+       ipv6-address, may be used in situations where the zone is
+       known from the context and hence no zone index is needed.";
+    reference
+     "RFC 4291: IP Version 6 Addressing Architecture
+      RFC 4007: IPv6 Scoped Address Architecture
+      RFC 5952: A Recommendation for IPv6 Address Text
+                Representation";
+  }
+
+  typedef ip-prefix {
+    type union {
+      type inet:ipv4-prefix;
+      type inet:ipv6-prefix;
+    }
+    description
+     "The ip-prefix type represents an IP prefix and is IP
+      version neutral.  The format of the textual representations
+      implies the IP version.";
+  }
+
+  typedef ipv4-prefix {
+    type string {
+      pattern
+         '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}'
+       +  '([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])'
+       + '/(([0-9])|([1-2][0-9])|(3[0-2]))';
+    }
+    description
+     "The ipv4-prefix type represents an IPv4 address prefix.
+      The prefix length is given by the number following the
+      slash character and must be less than or equal to 32.
+
+      A prefix length value of n corresponds to an IP address
+      mask that has n contiguous 1-bits from the most
+      significant bit (MSB) and all other bits set to 0.
+
+      The canonical format of an IPv4 prefix has all bits of
+      the IPv4 address set to zero that are not part of the
+      IPv4 prefix.";
+  }
+
+  typedef ipv6-prefix {
+    type string {
+      pattern '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}'
+            + '((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|'
+            + '(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\.){3}'
+            + '(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))'
+            + '(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))';
+      pattern '(([^:]+:){6}(([^:]+:[^:]+)|(.*\..*)))|'
+            + '((([^:]+:)*[^:]+)?::(([^:]+:)*[^:]+)?)'
+            + '(/.+)';
+    }
+
+    description
+     "The ipv6-prefix type represents an IPv6 address prefix.
+      The prefix length is given by the number following the
+      slash character and must be less than or equal to 128.
+
+      A prefix length value of n corresponds to an IP address
+      mask that has n contiguous 1-bits from the most
+      significant bit (MSB) and all other bits set to 0.
+
+      The IPv6 address should have all bits that do not belong
+      to the prefix set to zero.
+
+      The canonical format of an IPv6 prefix has all bits of
+      the IPv6 address set to zero that are not part of the
+      IPv6 prefix.  Furthermore, the IPv6 address is represented
+      as defined in Section 4 of RFC 5952.";
+    reference
+     "RFC 5952: A Recommendation for IPv6 Address Text
+                Representation";
+  }
+
+  /*** collection of domain name and URI types ***/
+
+  typedef domain-name {
+    type string {
+      pattern
+        '((([a-zA-Z0-9_]([a-zA-Z0-9\-_]){0,61})?[a-zA-Z0-9]\.)*'
+      + '([a-zA-Z0-9_]([a-zA-Z0-9\-_]){0,61})?[a-zA-Z0-9]\.?)'
+      + '|\.';
+      length "1..253";
+    }
+    description
+     "The domain-name type represents a DNS domain name.  The
+      name SHOULD be fully qualified whenever possible.
+
+      Internet domain names are only loosely specified.  Section
+      3.5 of RFC 1034 recommends a syntax (modified in Section
+      2.1 of RFC 1123).  The pattern above is intended to allow
+      for current practice in domain name use, and some possible
+      future expansion.  It is designed to hold various types of
+      domain names, including names used for A or AAAA records
+      (host names) and other records, such as SRV records.  Note
+      that Internet host names have a stricter syntax (described
+      in RFC 952) than the DNS recommendations in RFCs 1034 and
+      1123, and that systems that want to store host names in
+      schema nodes using the domain-name type are recommended to
+      adhere to this stricter standard to ensure interoperability.
+
+      The encoding of DNS names in the DNS protocol is limited
+      to 255 characters.  Since the encoding consists of labels
+      prefixed by a length bytes and there is a trailing NULL
+      byte, only 253 characters can appear in the textual dotted
+      notation.
+
+      The description clause of schema nodes using the domain-name
+      type MUST describe when and how these names are resolved to
+      IP addresses.  Note that the resolution of a domain-name value
+      may require to query multiple DNS records (e.g., A for IPv4
+      and AAAA for IPv6).  The order of the resolution process and
+      which DNS record takes precedence can either be defined
+      explicitly or may depend on the configuration of the
+      resolver.
+
+      Domain-name values use the US-ASCII encoding.  Their canonical
+      format uses lowercase US-ASCII characters.  Internationalized
+      domain names MUST be A-labels as per RFC 5890.";
+    reference
+     "RFC  952: DoD Internet Host Table Specification
+      RFC 1034: Domain Names - Concepts and Facilities
+      RFC 1123: Requirements for Internet Hosts -- Application
+                and Support
+      RFC 2782: A DNS RR for specifying the location of services
+                (DNS SRV)
+      RFC 5890: Internationalized Domain Names in Applications
+                (IDNA): Definitions and Document Framework";
+  }
+
+  typedef host {
+    type union {
+      type inet:ip-address;
+      type inet:domain-name;
+    }
+    description
+     "The host type represents either an IP address or a DNS
+      domain name.";
+  }
+
+  typedef uri {
+    type string;
+    description
+     "The uri type represents a Uniform Resource Identifier
+      (URI) as defined by STD 66.
+
+      Objects using the uri type MUST be in US-ASCII encoding,
+      and MUST be normalized as described by RFC 3986 Sections
+      6.2.1, 6.2.2.1, and 6.2.2.2.  All unnecessary
+      percent-encoding is removed, and all case-insensitive
+      characters are set to lowercase except for hexadecimal
+      digits, which are normalized to uppercase as described in
+      Section 6.2.2.1.
+
+      The purpose of this normalization is to help provide
+      unique URIs.  Note that this normalization is not
+      sufficient to provide uniqueness.  Two URIs that are
+      textually distinct after this normalization may still be
+      equivalent.
+
+      Objects using the uri type may restrict the schemes that
+      they permit.  For example, 'data:' and 'urn:' schemes
+      might not be appropriate.
+
+      A zero-length URI is not a valid URI.  This can be used to
+      express 'URI absent' where required.
+
+      In the value set and its semantics, this type is equivalent
+      to the Uri SMIv2 textual convention defined in RFC 5017.";
+    reference
+     "RFC 3986: Uniform Resource Identifier (URI): Generic Syntax
+      RFC 3305: Report from the Joint W3C/IETF URI Planning Interest
+                Group: Uniform Resource Identifiers (URIs), URLs,
+                and Uniform Resource Names (URNs): Clarifications
+                and Recommendations
+      RFC 5017: MIB Textual Conventions for Uniform Resource
+                Identifiers (URIs)";
+  }
+
+}
diff --git a/src/device/tests/qkd/mock_qkd_nodes/yang/ietf-yang-types.yang b/src/device/tests/qkd/mock_qkd_nodes/yang/ietf-yang-types.yang
new file mode 100644
index 0000000000000000000000000000000000000000..ee58fa3ab0042120d5607b8713d21fa0ba845895
--- /dev/null
+++ b/src/device/tests/qkd/mock_qkd_nodes/yang/ietf-yang-types.yang
@@ -0,0 +1,474 @@
+module ietf-yang-types {
+
+  namespace "urn:ietf:params:xml:ns:yang:ietf-yang-types";
+  prefix "yang";
+
+  organization
+   "IETF NETMOD (NETCONF Data Modeling Language) Working Group";
+
+  contact
+   "WG Web:   <http://tools.ietf.org/wg/netmod/>
+    WG List:  <mailto:netmod@ietf.org>
+
+    WG Chair: David Kessens
+              <mailto:david.kessens@nsn.com>
+
+    WG Chair: Juergen Schoenwaelder
+              <mailto:j.schoenwaelder@jacobs-university.de>
+
+    Editor:   Juergen Schoenwaelder
+              <mailto:j.schoenwaelder@jacobs-university.de>";
+
+  description
+   "This module contains a collection of generally useful derived
+    YANG data types.
+
+    Copyright (c) 2013 IETF Trust and the persons identified as
+    authors of the code.  All rights reserved.
+
+    Redistribution and use in source and binary forms, with or
+    without modification, is permitted pursuant to, and subject
+    to the license terms contained in, the Simplified BSD License
+    set forth in Section 4.c of the IETF Trust's Legal Provisions
+    Relating to IETF Documents
+    (http://trustee.ietf.org/license-info).
+
+    This version of this YANG module is part of RFC 6991; see
+    the RFC itself for full legal notices.";
+
+  revision 2013-07-15 {
+    description
+     "This revision adds the following new data types:
+      - yang-identifier
+      - hex-string
+      - uuid
+      - dotted-quad";
+    reference
+     "RFC 6991: Common YANG Data Types";
+  }
+
+  revision 2010-09-24 {
+    description
+     "Initial revision.";
+    reference
+     "RFC 6021: Common YANG Data Types";
+  }
+
+  /*** collection of counter and gauge types ***/
+
+  typedef counter32 {
+    type uint32;
+    description
+     "The counter32 type represents a non-negative integer
+      that monotonically increases until it reaches a
+      maximum value of 2^32-1 (4294967295 decimal), when it
+      wraps around and starts increasing again from zero.
+
+      Counters have no defined 'initial' value, and thus, a
+      single value of a counter has (in general) no information
+      content.  Discontinuities in the monotonically increasing
+      value normally occur at re-initialization of the
+      management system, and at other times as specified in the
+      description of a schema node using this type.  If such
+      other times can occur, for example, the creation of
+      a schema node of type counter32 at times other than
+      re-initialization, then a corresponding schema node
+      should be defined, with an appropriate type, to indicate
+      the last discontinuity.
+
+      The counter32 type should not be used for configuration
+      schema nodes.  A default statement SHOULD NOT be used in
+      combination with the type counter32.
+
+      In the value set and its semantics, this type is equivalent
+      to the Counter32 type of the SMIv2.";
+    reference
+     "RFC 2578: Structure of Management Information Version 2
+                (SMIv2)";
+  }
+
+  typedef zero-based-counter32 {
+    type yang:counter32;
+    default "0";
+    description
+     "The zero-based-counter32 type represents a counter32
+      that has the defined 'initial' value zero.
+
+      A schema node of this type will be set to zero (0) on creation
+      and will thereafter increase monotonically until it reaches
+      a maximum value of 2^32-1 (4294967295 decimal), when it
+      wraps around and starts increasing again from zero.
+
+      Provided that an application discovers a new schema node
+      of this type within the minimum time to wrap, it can use the
+      'initial' value as a delta.  It is important for a management
+      station to be aware of this minimum time and the actual time
+      between polls, and to discard data if the actual time is too
+      long or there is no defined minimum time.
+
+      In the value set and its semantics, this type is equivalent
+      to the ZeroBasedCounter32 textual convention of the SMIv2.";
+    reference
+      "RFC 4502: Remote Network Monitoring Management Information
+                 Base Version 2";
+  }
+
+  typedef counter64 {
+    type uint64;
+    description
+     "The counter64 type represents a non-negative integer
+      that monotonically increases until it reaches a
+      maximum value of 2^64-1 (18446744073709551615 decimal),
+      when it wraps around and starts increasing again from zero.
+
+      Counters have no defined 'initial' value, and thus, a
+      single value of a counter has (in general) no information
+      content.  Discontinuities in the monotonically increasing
+      value normally occur at re-initialization of the
+      management system, and at other times as specified in the
+      description of a schema node using this type.  If such
+      other times can occur, for example, the creation of
+      a schema node of type counter64 at times other than
+      re-initialization, then a corresponding schema node
+      should be defined, with an appropriate type, to indicate
+      the last discontinuity.
+
+      The counter64 type should not be used for configuration
+      schema nodes.  A default statement SHOULD NOT be used in
+      combination with the type counter64.
+
+      In the value set and its semantics, this type is equivalent
+      to the Counter64 type of the SMIv2.";
+    reference
+     "RFC 2578: Structure of Management Information Version 2
+                (SMIv2)";
+  }
+
+  typedef zero-based-counter64 {
+    type yang:counter64;
+    default "0";
+    description
+     "The zero-based-counter64 type represents a counter64 that
+      has the defined 'initial' value zero.
+
+      A schema node of this type will be set to zero (0) on creation
+      and will thereafter increase monotonically until it reaches
+      a maximum value of 2^64-1 (18446744073709551615 decimal),
+      when it wraps around and starts increasing again from zero.
+
+      Provided that an application discovers a new schema node
+      of this type within the minimum time to wrap, it can use the
+      'initial' value as a delta.  It is important for a management
+      station to be aware of this minimum time and the actual time
+      between polls, and to discard data if the actual time is too
+      long or there is no defined minimum time.
+
+      In the value set and its semantics, this type is equivalent
+      to the ZeroBasedCounter64 textual convention of the SMIv2.";
+    reference
+     "RFC 2856: Textual Conventions for Additional High Capacity
+                Data Types";
+  }
+
+  typedef gauge32 {
+    type uint32;
+    description
+     "The gauge32 type represents a non-negative integer, which
+      may increase or decrease, but shall never exceed a maximum
+      value, nor fall below a minimum value.  The maximum value
+      cannot be greater than 2^32-1 (4294967295 decimal), and
+      the minimum value cannot be smaller than 0.  The value of
+      a gauge32 has its maximum value whenever the information
+      being modeled is greater than or equal to its maximum
+      value, and has its minimum value whenever the information
+      being modeled is smaller than or equal to its minimum value.
+      If the information being modeled subsequently decreases
+      below (increases above) the maximum (minimum) value, the
+      gauge32 also decreases (increases).
+
+      In the value set and its semantics, this type is equivalent
+      to the Gauge32 type of the SMIv2.";
+    reference
+     "RFC 2578: Structure of Management Information Version 2
+                (SMIv2)";
+  }
+
+  typedef gauge64 {
+    type uint64;
+    description
+     "The gauge64 type represents a non-negative integer, which
+      may increase or decrease, but shall never exceed a maximum
+      value, nor fall below a minimum value.  The maximum value
+      cannot be greater than 2^64-1 (18446744073709551615), and
+      the minimum value cannot be smaller than 0.  The value of
+      a gauge64 has its maximum value whenever the information
+      being modeled is greater than or equal to its maximum
+      value, and has its minimum value whenever the information
+      being modeled is smaller than or equal to its minimum value.
+      If the information being modeled subsequently decreases
+      below (increases above) the maximum (minimum) value, the
+      gauge64 also decreases (increases).
+
+      In the value set and its semantics, this type is equivalent
+      to the CounterBasedGauge64 SMIv2 textual convention defined
+      in RFC 2856";
+    reference
+     "RFC 2856: Textual Conventions for Additional High Capacity
+                Data Types";
+  }
+
+  /*** collection of identifier-related types ***/
+
+  typedef object-identifier {
+    type string {
+      pattern '(([0-1](\.[1-3]?[0-9]))|(2\.(0|([1-9]\d*))))'
+            + '(\.(0|([1-9]\d*)))*';
+    }
+    description
+     "The object-identifier type represents administratively
+      assigned names in a registration-hierarchical-name tree.
+
+      Values of this type are denoted as a sequence of numerical
+      non-negative sub-identifier values.  Each sub-identifier
+      value MUST NOT exceed 2^32-1 (4294967295).  Sub-identifiers
+      are separated by single dots and without any intermediate
+      whitespace.
+
+      The ASN.1 standard restricts the value space of the first
+      sub-identifier to 0, 1, or 2.  Furthermore, the value space
+      of the second sub-identifier is restricted to the range
+      0 to 39 if the first sub-identifier is 0 or 1.  Finally,
+      the ASN.1 standard requires that an object identifier
+      has always at least two sub-identifiers.  The pattern
+      captures these restrictions.
+
+      Although the number of sub-identifiers is not limited,
+      module designers should realize that there may be
+      implementations that stick with the SMIv2 limit of 128
+      sub-identifiers.
+
+      This type is a superset of the SMIv2 OBJECT IDENTIFIER type
+      since it is not restricted to 128 sub-identifiers.  Hence,
+      this type SHOULD NOT be used to represent the SMIv2 OBJECT
+      IDENTIFIER type; the object-identifier-128 type SHOULD be
+      used instead.";
+    reference
+     "ISO9834-1: Information technology -- Open Systems
+      Interconnection -- Procedures for the operation of OSI
+      Registration Authorities: General procedures and top
+      arcs of the ASN.1 Object Identifier tree";
+  }
+
+  typedef object-identifier-128 {
+    type object-identifier {
+      pattern '\d*(\.\d*){1,127}';
+    }
+    description
+     "This type represents object-identifiers restricted to 128
+      sub-identifiers.
+
+      In the value set and its semantics, this type is equivalent
+      to the OBJECT IDENTIFIER type of the SMIv2.";
+    reference
+     "RFC 2578: Structure of Management Information Version 2
+                (SMIv2)";
+  }
+
+  typedef yang-identifier {
+    type string {
+      length "1..max";
+      pattern '[a-zA-Z_][a-zA-Z0-9\-_.]*';
+      pattern '.|..|[^xX].*|.[^mM].*|..[^lL].*';
+    }
+    description
+      "A YANG identifier string as defined by the 'identifier'
+       rule in Section 12 of RFC 6020.  An identifier must
+       start with an alphabetic character or an underscore
+       followed by an arbitrary sequence of alphabetic or
+       numeric characters, underscores, hyphens, or dots.
+
+       A YANG identifier MUST NOT start with any possible
+       combination of the lowercase or uppercase character
+       sequence 'xml'.";
+    reference
+      "RFC 6020: YANG - A Data Modeling Language for the Network
+                 Configuration Protocol (NETCONF)";
+  }
+
+  /*** collection of types related to date and time***/
+
+  typedef date-and-time {
+    type string {
+      pattern '\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d+)?'
+            + '(Z|[\+\-]\d{2}:\d{2})';
+    }
+    description
+     "The date-and-time type is a profile of the ISO 8601
+      standard for representation of dates and times using the
+      Gregorian calendar.  The profile is defined by the
+      date-time production in Section 5.6 of RFC 3339.
+
+      The date-and-time type is compatible with the dateTime XML
+      schema type with the following notable exceptions:
+
+      (a) The date-and-time type does not allow negative years.
+
+      (b) The date-and-time time-offset -00:00 indicates an unknown
+          time zone (see RFC 3339) while -00:00 and +00:00 and Z
+          all represent the same time zone in dateTime.
+
+      (c) The canonical format (see below) of data-and-time values
+          differs from the canonical format used by the dateTime XML
+          schema type, which requires all times to be in UTC using
+          the time-offset 'Z'.
+
+      This type is not equivalent to the DateAndTime textual
+      convention of the SMIv2 since RFC 3339 uses a different
+      separator between full-date and full-time and provides
+      higher resolution of time-secfrac.
+
+      The canonical format for date-and-time values with a known time
+      zone uses a numeric time zone offset that is calculated using
+      the device's configured known offset to UTC time.  A change of
+      the device's offset to UTC time will cause date-and-time values
+      to change accordingly.  Such changes might happen periodically
+      in case a server follows automatically daylight saving time
+      (DST) time zone offset changes.  The canonical format for
+      date-and-time values with an unknown time zone (usually
+      referring to the notion of local time) uses the time-offset
+      -00:00.";
+    reference
+     "RFC 3339: Date and Time on the Internet: Timestamps
+      RFC 2579: Textual Conventions for SMIv2
+      XSD-TYPES: XML Schema Part 2: Datatypes Second Edition";
+  }
+
+  typedef timeticks {
+    type uint32;
+    description
+     "The timeticks type represents a non-negative integer that
+      represents the time, modulo 2^32 (4294967296 decimal), in
+      hundredths of a second between two epochs.  When a schema
+      node is defined that uses this type, the description of
+      the schema node identifies both of the reference epochs.
+
+      In the value set and its semantics, this type is equivalent
+      to the TimeTicks type of the SMIv2.";
+    reference
+     "RFC 2578: Structure of Management Information Version 2
+                (SMIv2)";
+  }
+
+  typedef timestamp {
+    type yang:timeticks;
+    description
+     "The timestamp type represents the value of an associated
+      timeticks schema node at which a specific occurrence
+      happened.  The specific occurrence must be defined in the
+      description of any schema node defined using this type.  When
+      the specific occurrence occurred prior to the last time the
+      associated timeticks attribute was zero, then the timestamp
+      value is zero.  Note that this requires all timestamp values
+      to be reset to zero when the value of the associated timeticks
+      attribute reaches 497+ days and wraps around to zero.
+
+      The associated timeticks schema node must be specified
+      in the description of any schema node using this type.
+
+      In the value set and its semantics, this type is equivalent
+      to the TimeStamp textual convention of the SMIv2.";
+    reference
+     "RFC 2579: Textual Conventions for SMIv2";
+  }
+
+  /*** collection of generic address types ***/
+
+  typedef phys-address {
+    type string {
+      pattern '([0-9a-fA-F]{2}(:[0-9a-fA-F]{2})*)?';
+    }
+
+    description
+     "Represents media- or physical-level addresses represented
+      as a sequence octets, each octet represented by two hexadecimal
+      numbers.  Octets are separated by colons.  The canonical
+      representation uses lowercase characters.
+
+      In the value set and its semantics, this type is equivalent
+      to the PhysAddress textual convention of the SMIv2.";
+    reference
+     "RFC 2579: Textual Conventions for SMIv2";
+  }
+
+  typedef mac-address {
+    type string {
+      pattern '[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}';
+    }
+    description
+     "The mac-address type represents an IEEE 802 MAC address.
+      The canonical representation uses lowercase characters.
+
+      In the value set and its semantics, this type is equivalent
+      to the MacAddress textual convention of the SMIv2.";
+    reference
+     "IEEE 802: IEEE Standard for Local and Metropolitan Area
+                Networks: Overview and Architecture
+      RFC 2579: Textual Conventions for SMIv2";
+  }
+
+  /*** collection of XML-specific types ***/
+
+  typedef xpath1.0 {
+    type string;
+    description
+     "This type represents an XPATH 1.0 expression.
+
+      When a schema node is defined that uses this type, the
+      description of the schema node MUST specify the XPath
+      context in which the XPath expression is evaluated.";
+    reference
+     "XPATH: XML Path Language (XPath) Version 1.0";
+  }
+
+  /*** collection of string types ***/
+
+  typedef hex-string {
+    type string {
+      pattern '([0-9a-fA-F]{2}(:[0-9a-fA-F]{2})*)?';
+    }
+    description
+     "A hexadecimal string with octets represented as hex digits
+      separated by colons.  The canonical representation uses
+      lowercase characters.";
+  }
+
+  typedef uuid {
+    type string {
+      pattern '[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-'
+            + '[0-9a-fA-F]{4}-[0-9a-fA-F]{12}';
+    }
+    description
+     "A Universally Unique IDentifier in the string representation
+      defined in RFC 4122.  The canonical representation uses
+      lowercase characters.
+
+      The following is an example of a UUID in string representation:
+      f81d4fae-7dec-11d0-a765-00a0c91e6bf6
+      ";
+    reference
+     "RFC 4122: A Universally Unique IDentifier (UUID) URN
+                Namespace";
+  }
+
+  typedef dotted-quad {
+    type string {
+      pattern
+        '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}'
+      + '([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])';
+    }
+    description
+      "An unsigned 32-bit number expressed in the dotted-quad
+       notation, i.e., four octets written as decimal numbers
+       and separated with the '.' (full stop) character.";
+  }
+}
diff --git a/src/device/tests/qkd/unit/LICENSE b/src/device/tests/qkd/unit/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..6b678c507101c0682cfcd340bc97522ccabe7e4d
--- /dev/null
+++ b/src/device/tests/qkd/unit/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2017 FullStory, Inc
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/src/device/tests/qkd/unit/generated/context_pb2.py b/src/device/tests/qkd/unit/generated/context_pb2.py
new file mode 100644
index 0000000000000000000000000000000000000000..89c70075bbe224d7f5f2a47578a9775537985c51
--- /dev/null
+++ b/src/device/tests/qkd/unit/generated/context_pb2.py
@@ -0,0 +1,994 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler.  DO NOT EDIT!
+# source: context.proto
+"""Generated protocol buffer code."""
+from google.protobuf.internal import enum_type_wrapper
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import descriptor_pool as _descriptor_pool
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+import acl_pb2 as acl__pb2
+import kpi_sample_types_pb2 as kpi__sample__types__pb2
+
+
+DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\rcontext.proto\x12\x07\x63ontext\x1a\tacl.proto\x1a\x16kpi_sample_types.proto\"\x07\n\x05\x45mpty\"\x14\n\x04Uuid\x12\x0c\n\x04uuid\x18\x01 \x01(\t\"\x1e\n\tTimestamp\x12\x11\n\ttimestamp\x18\x01 \x01(\x01\"Z\n\x05\x45vent\x12%\n\ttimestamp\x18\x01 \x01(\x0b\x32\x12.context.Timestamp\x12*\n\nevent_type\x18\x02 \x01(\x0e\x32\x16.context.EventTypeEnum\"0\n\tContextId\x12#\n\x0c\x63ontext_uuid\x18\x01 \x01(\x0b\x32\r.context.Uuid\"\xe9\x01\n\x07\x43ontext\x12&\n\ncontext_id\x18\x01 \x01(\x0b\x32\x12.context.ContextId\x12\x0c\n\x04name\x18\x02 \x01(\t\x12)\n\x0ctopology_ids\x18\x03 \x03(\x0b\x32\x13.context.TopologyId\x12\'\n\x0bservice_ids\x18\x04 \x03(\x0b\x32\x12.context.ServiceId\x12#\n\tslice_ids\x18\x05 \x03(\x0b\x32\x10.context.SliceId\x12/\n\ncontroller\x18\x06 \x01(\x0b\x32\x1b.context.TeraFlowController\"8\n\rContextIdList\x12\'\n\x0b\x63ontext_ids\x18\x01 \x03(\x0b\x32\x12.context.ContextId\"1\n\x0b\x43ontextList\x12\"\n\x08\x63ontexts\x18\x01 \x03(\x0b\x32\x10.context.Context\"U\n\x0c\x43ontextEvent\x12\x1d\n\x05\x65vent\x18\x01 \x01(\x0b\x32\x0e.context.Event\x12&\n\ncontext_id\x18\x02 \x01(\x0b\x32\x12.context.ContextId\"Z\n\nTopologyId\x12&\n\ncontext_id\x18\x01 \x01(\x0b\x32\x12.context.ContextId\x12$\n\rtopology_uuid\x18\x02 \x01(\x0b\x32\r.context.Uuid\"\x8c\x01\n\x08Topology\x12(\n\x0btopology_id\x18\x01 \x01(\x0b\x32\x13.context.TopologyId\x12\x0c\n\x04name\x18\x02 \x01(\t\x12%\n\ndevice_ids\x18\x03 \x03(\x0b\x32\x11.context.DeviceId\x12!\n\x08link_ids\x18\x04 \x03(\x0b\x32\x0f.context.LinkId\"\x89\x01\n\x0fTopologyDetails\x12(\n\x0btopology_id\x18\x01 \x01(\x0b\x32\x13.context.TopologyId\x12\x0c\n\x04name\x18\x02 \x01(\t\x12 \n\x07\x64\x65vices\x18\x03 \x03(\x0b\x32\x0f.context.Device\x12\x1c\n\x05links\x18\x04 \x03(\x0b\x32\r.context.Link\";\n\x0eTopologyIdList\x12)\n\x0ctopology_ids\x18\x01 \x03(\x0b\x32\x13.context.TopologyId\"5\n\x0cTopologyList\x12%\n\ntopologies\x18\x01 \x03(\x0b\x32\x11.context.Topology\"X\n\rTopologyEvent\x12\x1d\n\x05\x65vent\x18\x01 \x01(\x0b\x32\x0e.context.Event\x12(\n\x0btopology_id\x18\x02 \x01(\x0b\x32\x13.context.TopologyId\".\n\x08\x44\x65viceId\x12\"\n\x0b\x64\x65vice_uuid\x18\x01 \x01(\x0b\x32\r.context.Uuid\"\xfa\x02\n\x06\x44\x65vice\x12$\n\tdevice_id\x18\x01 \x01(\x0b\x32\x11.context.DeviceId\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x13\n\x0b\x64\x65vice_type\x18\x03 \x01(\t\x12,\n\rdevice_config\x18\x04 \x01(\x0b\x32\x15.context.DeviceConfig\x12G\n\x19\x64\x65vice_operational_status\x18\x05 \x01(\x0e\x32$.context.DeviceOperationalStatusEnum\x12\x31\n\x0e\x64\x65vice_drivers\x18\x06 \x03(\x0e\x32\x19.context.DeviceDriverEnum\x12+\n\x10\x64\x65vice_endpoints\x18\x07 \x03(\x0b\x32\x11.context.EndPoint\x12&\n\ncomponents\x18\x08 \x03(\x0b\x32\x12.context.Component\x12(\n\rcontroller_id\x18\t \x01(\x0b\x32\x11.context.DeviceId\"\xc9\x01\n\tComponent\x12%\n\x0e\x63omponent_uuid\x18\x01 \x01(\x0b\x32\r.context.Uuid\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0c\n\x04type\x18\x03 \x01(\t\x12\x36\n\nattributes\x18\x04 \x03(\x0b\x32\".context.Component.AttributesEntry\x12\x0e\n\x06parent\x18\x05 \x01(\t\x1a\x31\n\x0f\x41ttributesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"9\n\x0c\x44\x65viceConfig\x12)\n\x0c\x63onfig_rules\x18\x01 \x03(\x0b\x32\x13.context.ConfigRule\"5\n\x0c\x44\x65viceIdList\x12%\n\ndevice_ids\x18\x01 \x03(\x0b\x32\x11.context.DeviceId\".\n\nDeviceList\x12 \n\x07\x64\x65vices\x18\x01 \x03(\x0b\x32\x0f.context.Device\"\x8e\x01\n\x0c\x44\x65viceFilter\x12)\n\ndevice_ids\x18\x01 \x01(\x0b\x32\x15.context.DeviceIdList\x12\x19\n\x11include_endpoints\x18\x02 \x01(\x08\x12\x1c\n\x14include_config_rules\x18\x03 \x01(\x08\x12\x1a\n\x12include_components\x18\x04 \x01(\x08\"\x80\x01\n\x0b\x44\x65viceEvent\x12\x1d\n\x05\x65vent\x18\x01 \x01(\x0b\x32\x0e.context.Event\x12$\n\tdevice_id\x18\x02 \x01(\x0b\x32\x11.context.DeviceId\x12,\n\rdevice_config\x18\x03 \x01(\x0b\x32\x15.context.DeviceConfig\"*\n\x06LinkId\x12 \n\tlink_uuid\x18\x01 \x01(\x0b\x32\r.context.Uuid\"I\n\x0eLinkAttributes\x12\x1b\n\x13total_capacity_gbps\x18\x01 \x01(\x02\x12\x1a\n\x12used_capacity_gbps\x18\x02 \x01(\x02\"\x93\x01\n\x04Link\x12 \n\x07link_id\x18\x01 \x01(\x0b\x32\x0f.context.LinkId\x12\x0c\n\x04name\x18\x02 \x01(\t\x12.\n\x11link_endpoint_ids\x18\x03 \x03(\x0b\x32\x13.context.EndPointId\x12+\n\nattributes\x18\x04 \x01(\x0b\x32\x17.context.LinkAttributes\"/\n\nLinkIdList\x12!\n\x08link_ids\x18\x01 \x03(\x0b\x32\x0f.context.LinkId\"(\n\x08LinkList\x12\x1c\n\x05links\x18\x01 \x03(\x0b\x32\r.context.Link\"L\n\tLinkEvent\x12\x1d\n\x05\x65vent\x18\x01 \x01(\x0b\x32\x0e.context.Event\x12 \n\x07link_id\x18\x02 \x01(\x0b\x32\x0f.context.LinkId\"X\n\tServiceId\x12&\n\ncontext_id\x18\x01 \x01(\x0b\x32\x12.context.ContextId\x12#\n\x0cservice_uuid\x18\x02 \x01(\x0b\x32\r.context.Uuid\"\xdb\x02\n\x07Service\x12&\n\nservice_id\x18\x01 \x01(\x0b\x32\x12.context.ServiceId\x12\x0c\n\x04name\x18\x02 \x01(\t\x12.\n\x0cservice_type\x18\x03 \x01(\x0e\x32\x18.context.ServiceTypeEnum\x12\x31\n\x14service_endpoint_ids\x18\x04 \x03(\x0b\x32\x13.context.EndPointId\x12\x30\n\x13service_constraints\x18\x05 \x03(\x0b\x32\x13.context.Constraint\x12.\n\x0eservice_status\x18\x06 \x01(\x0b\x32\x16.context.ServiceStatus\x12.\n\x0eservice_config\x18\x07 \x01(\x0b\x32\x16.context.ServiceConfig\x12%\n\ttimestamp\x18\x08 \x01(\x0b\x32\x12.context.Timestamp\"C\n\rServiceStatus\x12\x32\n\x0eservice_status\x18\x01 \x01(\x0e\x32\x1a.context.ServiceStatusEnum\":\n\rServiceConfig\x12)\n\x0c\x63onfig_rules\x18\x01 \x03(\x0b\x32\x13.context.ConfigRule\"8\n\rServiceIdList\x12\'\n\x0bservice_ids\x18\x01 \x03(\x0b\x32\x12.context.ServiceId\"1\n\x0bServiceList\x12\"\n\x08services\x18\x01 \x03(\x0b\x32\x10.context.Service\"\x95\x01\n\rServiceFilter\x12+\n\x0bservice_ids\x18\x01 \x01(\x0b\x32\x16.context.ServiceIdList\x12\x1c\n\x14include_endpoint_ids\x18\x02 \x01(\x08\x12\x1b\n\x13include_constraints\x18\x03 \x01(\x08\x12\x1c\n\x14include_config_rules\x18\x04 \x01(\x08\"U\n\x0cServiceEvent\x12\x1d\n\x05\x65vent\x18\x01 \x01(\x0b\x32\x0e.context.Event\x12&\n\nservice_id\x18\x02 \x01(\x0b\x32\x12.context.ServiceId\"T\n\x07SliceId\x12&\n\ncontext_id\x18\x01 \x01(\x0b\x32\x12.context.ContextId\x12!\n\nslice_uuid\x18\x02 \x01(\x0b\x32\r.context.Uuid\"\xa0\x03\n\x05Slice\x12\"\n\x08slice_id\x18\x01 \x01(\x0b\x32\x10.context.SliceId\x12\x0c\n\x04name\x18\x02 \x01(\t\x12/\n\x12slice_endpoint_ids\x18\x03 \x03(\x0b\x32\x13.context.EndPointId\x12.\n\x11slice_constraints\x18\x04 \x03(\x0b\x32\x13.context.Constraint\x12-\n\x11slice_service_ids\x18\x05 \x03(\x0b\x32\x12.context.ServiceId\x12,\n\x12slice_subslice_ids\x18\x06 \x03(\x0b\x32\x10.context.SliceId\x12*\n\x0cslice_status\x18\x07 \x01(\x0b\x32\x14.context.SliceStatus\x12*\n\x0cslice_config\x18\x08 \x01(\x0b\x32\x14.context.SliceConfig\x12(\n\x0bslice_owner\x18\t \x01(\x0b\x32\x13.context.SliceOwner\x12%\n\ttimestamp\x18\n \x01(\x0b\x32\x12.context.Timestamp\"E\n\nSliceOwner\x12!\n\nowner_uuid\x18\x01 \x01(\x0b\x32\r.context.Uuid\x12\x14\n\x0cowner_string\x18\x02 \x01(\t\"=\n\x0bSliceStatus\x12.\n\x0cslice_status\x18\x01 \x01(\x0e\x32\x18.context.SliceStatusEnum\"8\n\x0bSliceConfig\x12)\n\x0c\x63onfig_rules\x18\x01 \x03(\x0b\x32\x13.context.ConfigRule\"2\n\x0bSliceIdList\x12#\n\tslice_ids\x18\x01 \x03(\x0b\x32\x10.context.SliceId\"+\n\tSliceList\x12\x1e\n\x06slices\x18\x01 \x03(\x0b\x32\x0e.context.Slice\"\xca\x01\n\x0bSliceFilter\x12\'\n\tslice_ids\x18\x01 \x01(\x0b\x32\x14.context.SliceIdList\x12\x1c\n\x14include_endpoint_ids\x18\x02 \x01(\x08\x12\x1b\n\x13include_constraints\x18\x03 \x01(\x08\x12\x1b\n\x13include_service_ids\x18\x04 \x01(\x08\x12\x1c\n\x14include_subslice_ids\x18\x05 \x01(\x08\x12\x1c\n\x14include_config_rules\x18\x06 \x01(\x08\"O\n\nSliceEvent\x12\x1d\n\x05\x65vent\x18\x01 \x01(\x0b\x32\x0e.context.Event\x12\"\n\x08slice_id\x18\x02 \x01(\x0b\x32\x10.context.SliceId\"6\n\x0c\x43onnectionId\x12&\n\x0f\x63onnection_uuid\x18\x01 \x01(\x0b\x32\r.context.Uuid\"2\n\x15\x43onnectionSettings_L0\x12\x19\n\x11lsp_symbolic_name\x18\x01 \x01(\t\"\x9e\x01\n\x15\x43onnectionSettings_L2\x12\x17\n\x0fsrc_mac_address\x18\x01 \x01(\t\x12\x17\n\x0f\x64st_mac_address\x18\x02 \x01(\t\x12\x12\n\nether_type\x18\x03 \x01(\r\x12\x0f\n\x07vlan_id\x18\x04 \x01(\r\x12\x12\n\nmpls_label\x18\x05 \x01(\r\x12\x1a\n\x12mpls_traffic_class\x18\x06 \x01(\r\"t\n\x15\x43onnectionSettings_L3\x12\x16\n\x0esrc_ip_address\x18\x01 \x01(\t\x12\x16\n\x0e\x64st_ip_address\x18\x02 \x01(\t\x12\x0c\n\x04\x64scp\x18\x03 \x01(\r\x12\x10\n\x08protocol\x18\x04 \x01(\r\x12\x0b\n\x03ttl\x18\x05 \x01(\r\"[\n\x15\x43onnectionSettings_L4\x12\x10\n\x08src_port\x18\x01 \x01(\r\x12\x10\n\x08\x64st_port\x18\x02 \x01(\r\x12\x11\n\ttcp_flags\x18\x03 \x01(\r\x12\x0b\n\x03ttl\x18\x04 \x01(\r\"\xc4\x01\n\x12\x43onnectionSettings\x12*\n\x02l0\x18\x01 \x01(\x0b\x32\x1e.context.ConnectionSettings_L0\x12*\n\x02l2\x18\x02 \x01(\x0b\x32\x1e.context.ConnectionSettings_L2\x12*\n\x02l3\x18\x03 \x01(\x0b\x32\x1e.context.ConnectionSettings_L3\x12*\n\x02l4\x18\x04 \x01(\x0b\x32\x1e.context.ConnectionSettings_L4\"\xf3\x01\n\nConnection\x12,\n\rconnection_id\x18\x01 \x01(\x0b\x32\x15.context.ConnectionId\x12&\n\nservice_id\x18\x02 \x01(\x0b\x32\x12.context.ServiceId\x12\x33\n\x16path_hops_endpoint_ids\x18\x03 \x03(\x0b\x32\x13.context.EndPointId\x12+\n\x0fsub_service_ids\x18\x04 \x03(\x0b\x32\x12.context.ServiceId\x12-\n\x08settings\x18\x05 \x01(\x0b\x32\x1b.context.ConnectionSettings\"A\n\x10\x43onnectionIdList\x12-\n\x0e\x63onnection_ids\x18\x01 \x03(\x0b\x32\x15.context.ConnectionId\":\n\x0e\x43onnectionList\x12(\n\x0b\x63onnections\x18\x01 \x03(\x0b\x32\x13.context.Connection\"^\n\x0f\x43onnectionEvent\x12\x1d\n\x05\x65vent\x18\x01 \x01(\x0b\x32\x0e.context.Event\x12,\n\rconnection_id\x18\x02 \x01(\x0b\x32\x15.context.ConnectionId\"\x82\x01\n\nEndPointId\x12(\n\x0btopology_id\x18\x01 \x01(\x0b\x32\x13.context.TopologyId\x12$\n\tdevice_id\x18\x02 \x01(\x0b\x32\x11.context.DeviceId\x12$\n\rendpoint_uuid\x18\x03 \x01(\x0b\x32\r.context.Uuid\"\xc2\x01\n\x08\x45ndPoint\x12(\n\x0b\x65ndpoint_id\x18\x01 \x01(\x0b\x32\x13.context.EndPointId\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x15\n\rendpoint_type\x18\x03 \x01(\t\x12\x39\n\x10kpi_sample_types\x18\x04 \x03(\x0e\x32\x1f.kpi_sample_types.KpiSampleType\x12,\n\x11\x65ndpoint_location\x18\x05 \x01(\x0b\x32\x11.context.Location\"{\n\x0c\x45ndPointName\x12(\n\x0b\x65ndpoint_id\x18\x01 \x01(\x0b\x32\x13.context.EndPointId\x12\x13\n\x0b\x64\x65vice_name\x18\x02 \x01(\t\x12\x15\n\rendpoint_name\x18\x03 \x01(\t\x12\x15\n\rendpoint_type\x18\x04 \x01(\t\";\n\x0e\x45ndPointIdList\x12)\n\x0c\x65ndpoint_ids\x18\x01 \x03(\x0b\x32\x13.context.EndPointId\"A\n\x10\x45ndPointNameList\x12-\n\x0e\x65ndpoint_names\x18\x01 \x03(\x0b\x32\x15.context.EndPointName\"A\n\x11\x43onfigRule_Custom\x12\x14\n\x0cresource_key\x18\x01 \x01(\t\x12\x16\n\x0eresource_value\x18\x02 \x01(\t\"]\n\x0e\x43onfigRule_ACL\x12(\n\x0b\x65ndpoint_id\x18\x01 \x01(\x0b\x32\x13.context.EndPointId\x12!\n\x08rule_set\x18\x02 \x01(\x0b\x32\x0f.acl.AclRuleSet\"\x9c\x01\n\nConfigRule\x12)\n\x06\x61\x63tion\x18\x01 \x01(\x0e\x32\x19.context.ConfigActionEnum\x12,\n\x06\x63ustom\x18\x02 \x01(\x0b\x32\x1a.context.ConfigRule_CustomH\x00\x12&\n\x03\x61\x63l\x18\x03 \x01(\x0b\x32\x17.context.ConfigRule_ACLH\x00\x42\r\n\x0b\x63onfig_rule\"F\n\x11\x43onstraint_Custom\x12\x17\n\x0f\x63onstraint_type\x18\x01 \x01(\t\x12\x18\n\x10\x63onstraint_value\x18\x02 \x01(\t\"E\n\x13\x43onstraint_Schedule\x12\x17\n\x0fstart_timestamp\x18\x01 \x01(\x02\x12\x15\n\rduration_days\x18\x02 \x01(\x02\"3\n\x0cGPS_Position\x12\x10\n\x08latitude\x18\x01 \x01(\x02\x12\x11\n\tlongitude\x18\x02 \x01(\x02\"W\n\x08Location\x12\x10\n\x06region\x18\x01 \x01(\tH\x00\x12-\n\x0cgps_position\x18\x02 \x01(\x0b\x32\x15.context.GPS_PositionH\x00\x42\n\n\x08location\"l\n\x1b\x43onstraint_EndPointLocation\x12(\n\x0b\x65ndpoint_id\x18\x01 \x01(\x0b\x32\x13.context.EndPointId\x12#\n\x08location\x18\x02 \x01(\x0b\x32\x11.context.Location\"Y\n\x1b\x43onstraint_EndPointPriority\x12(\n\x0b\x65ndpoint_id\x18\x01 \x01(\x0b\x32\x13.context.EndPointId\x12\x10\n\x08priority\x18\x02 \x01(\r\"0\n\x16\x43onstraint_SLA_Latency\x12\x16\n\x0e\x65\x32\x65_latency_ms\x18\x01 \x01(\x02\"0\n\x17\x43onstraint_SLA_Capacity\x12\x15\n\rcapacity_gbps\x18\x01 \x01(\x02\"c\n\x1b\x43onstraint_SLA_Availability\x12\x1a\n\x12num_disjoint_paths\x18\x01 \x01(\r\x12\x12\n\nall_active\x18\x02 \x01(\x08\x12\x14\n\x0c\x61vailability\x18\x03 \x01(\x02\"V\n\x1e\x43onstraint_SLA_Isolation_level\x12\x34\n\x0fisolation_level\x18\x01 \x03(\x0e\x32\x1b.context.IsolationLevelEnum\"\xa2\x01\n\x15\x43onstraint_Exclusions\x12\x14\n\x0cis_permanent\x18\x01 \x01(\x08\x12%\n\ndevice_ids\x18\x02 \x03(\x0b\x32\x11.context.DeviceId\x12)\n\x0c\x65ndpoint_ids\x18\x03 \x03(\x0b\x32\x13.context.EndPointId\x12!\n\x08link_ids\x18\x04 \x03(\x0b\x32\x0f.context.LinkId\"\xdb\x04\n\nConstraint\x12-\n\x06\x61\x63tion\x18\x01 \x01(\x0e\x32\x1d.context.ConstraintActionEnum\x12,\n\x06\x63ustom\x18\x02 \x01(\x0b\x32\x1a.context.Constraint_CustomH\x00\x12\x30\n\x08schedule\x18\x03 \x01(\x0b\x32\x1c.context.Constraint_ScheduleH\x00\x12\x41\n\x11\x65ndpoint_location\x18\x04 \x01(\x0b\x32$.context.Constraint_EndPointLocationH\x00\x12\x41\n\x11\x65ndpoint_priority\x18\x05 \x01(\x0b\x32$.context.Constraint_EndPointPriorityH\x00\x12\x38\n\x0csla_capacity\x18\x06 \x01(\x0b\x32 .context.Constraint_SLA_CapacityH\x00\x12\x36\n\x0bsla_latency\x18\x07 \x01(\x0b\x32\x1f.context.Constraint_SLA_LatencyH\x00\x12@\n\x10sla_availability\x18\x08 \x01(\x0b\x32$.context.Constraint_SLA_AvailabilityH\x00\x12@\n\rsla_isolation\x18\t \x01(\x0b\x32\'.context.Constraint_SLA_Isolation_levelH\x00\x12\x34\n\nexclusions\x18\n \x01(\x0b\x32\x1e.context.Constraint_ExclusionsH\x00\x42\x0c\n\nconstraint\"^\n\x12TeraFlowController\x12&\n\ncontext_id\x18\x01 \x01(\x0b\x32\x12.context.ContextId\x12\x12\n\nip_address\x18\x02 \x01(\t\x12\x0c\n\x04port\x18\x03 \x01(\r\"U\n\x14\x41uthenticationResult\x12&\n\ncontext_id\x18\x01 \x01(\x0b\x32\x12.context.ContextId\x12\x15\n\rauthenticated\x18\x02 \x01(\x08\"-\n\x0fOpticalConfigId\x12\x1a\n\x12opticalconfig_uuid\x18\x01 \x01(\t\"S\n\rOpticalConfig\x12\x32\n\x10opticalconfig_id\x18\x01 \x01(\x0b\x32\x18.context.OpticalConfigId\x12\x0e\n\x06\x63onfig\x18\x02 \x01(\t\"C\n\x11OpticalConfigList\x12.\n\x0eopticalconfigs\x18\x01 \x03(\x0b\x32\x16.context.OpticalConfig\"9\n\rOpticalLinkId\x12(\n\x11optical_link_uuid\x18\x01 \x01(\x0b\x32\r.context.Uuid\",\n\x07\x46iberId\x12!\n\nfiber_uuid\x18\x01 \x01(\x0b\x32\r.context.Uuid\"\xe1\x01\n\x05\x46iber\x12\n\n\x02ID\x18\n \x01(\t\x12\x10\n\x08src_port\x18\x01 \x01(\t\x12\x10\n\x08\x64st_port\x18\x02 \x01(\t\x12\x17\n\x0flocal_peer_port\x18\x03 \x01(\t\x12\x18\n\x10remote_peer_port\x18\x04 \x01(\t\x12\x0f\n\x07\x63_slots\x18\x05 \x03(\x05\x12\x0f\n\x07l_slots\x18\x06 \x03(\x05\x12\x0f\n\x07s_slots\x18\x07 \x03(\x05\x12\x0e\n\x06length\x18\x08 \x01(\x02\x12\x0c\n\x04used\x18\t \x01(\x08\x12$\n\nfiber_uuid\x18\x0b \x01(\x0b\x32\x10.context.FiberId\"d\n\x12OpticalLinkDetails\x12\x0e\n\x06length\x18\x01 \x01(\x02\x12\x0e\n\x06source\x18\x02 \x01(\t\x12\x0e\n\x06target\x18\x03 \x01(\t\x12\x1e\n\x06\x66ibers\x18\x04 \x03(\x0b\x32\x0e.context.Fiber\"|\n\x0bOpticalLink\x12\x0c\n\x04name\x18\x01 \x01(\t\x12,\n\x07\x64\x65tails\x18\x02 \x01(\x0b\x32\x1b.context.OpticalLinkDetails\x12\x31\n\x11optical_link_uuid\x18\x03 \x01(\x0b\x32\x16.context.OpticalLinkId*j\n\rEventTypeEnum\x12\x17\n\x13\x45VENTTYPE_UNDEFINED\x10\x00\x12\x14\n\x10\x45VENTTYPE_CREATE\x10\x01\x12\x14\n\x10\x45VENTTYPE_UPDATE\x10\x02\x12\x14\n\x10\x45VENTTYPE_REMOVE\x10\x03*\xfe\x02\n\x10\x44\x65viceDriverEnum\x12\x1a\n\x16\x44\x45VICEDRIVER_UNDEFINED\x10\x00\x12\x1b\n\x17\x44\x45VICEDRIVER_OPENCONFIG\x10\x01\x12\x1e\n\x1a\x44\x45VICEDRIVER_TRANSPORT_API\x10\x02\x12\x13\n\x0f\x44\x45VICEDRIVER_P4\x10\x03\x12&\n\"DEVICEDRIVER_IETF_NETWORK_TOPOLOGY\x10\x04\x12\x1b\n\x17\x44\x45VICEDRIVER_ONF_TR_532\x10\x05\x12\x13\n\x0f\x44\x45VICEDRIVER_XR\x10\x06\x12\x1b\n\x17\x44\x45VICEDRIVER_IETF_L2VPN\x10\x07\x12 \n\x1c\x44\x45VICEDRIVER_GNMI_OPENCONFIG\x10\x08\x12\x1c\n\x18\x44\x45VICEDRIVER_OPTICAL_TFS\x10\t\x12\x1a\n\x16\x44\x45VICEDRIVER_IETF_ACTN\x10\n\x12\x13\n\x0f\x44\x45VICEDRIVER_OC\x10\x0b\x12\x14\n\x10\x44\x45VICEDRIVER_QKD\x10\x0c*\x8f\x01\n\x1b\x44\x65viceOperationalStatusEnum\x12%\n!DEVICEOPERATIONALSTATUS_UNDEFINED\x10\x00\x12$\n DEVICEOPERATIONALSTATUS_DISABLED\x10\x01\x12#\n\x1f\x44\x45VICEOPERATIONALSTATUS_ENABLED\x10\x02*\xe5\x01\n\x0fServiceTypeEnum\x12\x17\n\x13SERVICETYPE_UNKNOWN\x10\x00\x12\x14\n\x10SERVICETYPE_L3NM\x10\x01\x12\x14\n\x10SERVICETYPE_L2NM\x10\x02\x12)\n%SERVICETYPE_TAPI_CONNECTIVITY_SERVICE\x10\x03\x12\x12\n\x0eSERVICETYPE_TE\x10\x04\x12\x13\n\x0fSERVICETYPE_E2E\x10\x05\x12$\n SERVICETYPE_OPTICAL_CONNECTIVITY\x10\x06\x12\x13\n\x0fSERVICETYPE_QKD\x10\x07*\xc4\x01\n\x11ServiceStatusEnum\x12\x1b\n\x17SERVICESTATUS_UNDEFINED\x10\x00\x12\x19\n\x15SERVICESTATUS_PLANNED\x10\x01\x12\x18\n\x14SERVICESTATUS_ACTIVE\x10\x02\x12\x1a\n\x16SERVICESTATUS_UPDATING\x10\x03\x12!\n\x1dSERVICESTATUS_PENDING_REMOVAL\x10\x04\x12\x1e\n\x1aSERVICESTATUS_SLA_VIOLATED\x10\x05*\xa9\x01\n\x0fSliceStatusEnum\x12\x19\n\x15SLICESTATUS_UNDEFINED\x10\x00\x12\x17\n\x13SLICESTATUS_PLANNED\x10\x01\x12\x14\n\x10SLICESTATUS_INIT\x10\x02\x12\x16\n\x12SLICESTATUS_ACTIVE\x10\x03\x12\x16\n\x12SLICESTATUS_DEINIT\x10\x04\x12\x1c\n\x18SLICESTATUS_SLA_VIOLATED\x10\x05*]\n\x10\x43onfigActionEnum\x12\x1a\n\x16\x43ONFIGACTION_UNDEFINED\x10\x00\x12\x14\n\x10\x43ONFIGACTION_SET\x10\x01\x12\x17\n\x13\x43ONFIGACTION_DELETE\x10\x02*m\n\x14\x43onstraintActionEnum\x12\x1e\n\x1a\x43ONSTRAINTACTION_UNDEFINED\x10\x00\x12\x18\n\x14\x43ONSTRAINTACTION_SET\x10\x01\x12\x1b\n\x17\x43ONSTRAINTACTION_DELETE\x10\x02*\x83\x02\n\x12IsolationLevelEnum\x12\x10\n\x0cNO_ISOLATION\x10\x00\x12\x16\n\x12PHYSICAL_ISOLATION\x10\x01\x12\x15\n\x11LOGICAL_ISOLATION\x10\x02\x12\x15\n\x11PROCESS_ISOLATION\x10\x03\x12\x1d\n\x19PHYSICAL_MEMORY_ISOLATION\x10\x04\x12\x1e\n\x1aPHYSICAL_NETWORK_ISOLATION\x10\x05\x12\x1e\n\x1aVIRTUAL_RESOURCE_ISOLATION\x10\x06\x12\x1f\n\x1bNETWORK_FUNCTIONS_ISOLATION\x10\x07\x12\x15\n\x11SERVICE_ISOLATION\x10\x08\x32\xa6\x19\n\x0e\x43ontextService\x12:\n\x0eListContextIds\x12\x0e.context.Empty\x1a\x16.context.ContextIdList\"\x00\x12\x36\n\x0cListContexts\x12\x0e.context.Empty\x1a\x14.context.ContextList\"\x00\x12\x34\n\nGetContext\x12\x12.context.ContextId\x1a\x10.context.Context\"\x00\x12\x34\n\nSetContext\x12\x10.context.Context\x1a\x12.context.ContextId\"\x00\x12\x35\n\rRemoveContext\x12\x12.context.ContextId\x1a\x0e.context.Empty\"\x00\x12=\n\x10GetContextEvents\x12\x0e.context.Empty\x1a\x15.context.ContextEvent\"\x00\x30\x01\x12@\n\x0fListTopologyIds\x12\x12.context.ContextId\x1a\x17.context.TopologyIdList\"\x00\x12=\n\x0eListTopologies\x12\x12.context.ContextId\x1a\x15.context.TopologyList\"\x00\x12\x37\n\x0bGetTopology\x12\x13.context.TopologyId\x1a\x11.context.Topology\"\x00\x12\x45\n\x12GetTopologyDetails\x12\x13.context.TopologyId\x1a\x18.context.TopologyDetails\"\x00\x12\x37\n\x0bSetTopology\x12\x11.context.Topology\x1a\x13.context.TopologyId\"\x00\x12\x37\n\x0eRemoveTopology\x12\x13.context.TopologyId\x1a\x0e.context.Empty\"\x00\x12?\n\x11GetTopologyEvents\x12\x0e.context.Empty\x1a\x16.context.TopologyEvent\"\x00\x30\x01\x12\x38\n\rListDeviceIds\x12\x0e.context.Empty\x1a\x15.context.DeviceIdList\"\x00\x12\x34\n\x0bListDevices\x12\x0e.context.Empty\x1a\x13.context.DeviceList\"\x00\x12\x31\n\tGetDevice\x12\x11.context.DeviceId\x1a\x0f.context.Device\"\x00\x12\x31\n\tSetDevice\x12\x0f.context.Device\x1a\x11.context.DeviceId\"\x00\x12\x33\n\x0cRemoveDevice\x12\x11.context.DeviceId\x1a\x0e.context.Empty\"\x00\x12;\n\x0fGetDeviceEvents\x12\x0e.context.Empty\x1a\x14.context.DeviceEvent\"\x00\x30\x01\x12<\n\x0cSelectDevice\x12\x15.context.DeviceFilter\x1a\x13.context.DeviceList\"\x00\x12I\n\x11ListEndPointNames\x12\x17.context.EndPointIdList\x1a\x19.context.EndPointNameList\"\x00\x12\x34\n\x0bListLinkIds\x12\x0e.context.Empty\x1a\x13.context.LinkIdList\"\x00\x12\x30\n\tListLinks\x12\x0e.context.Empty\x1a\x11.context.LinkList\"\x00\x12+\n\x07GetLink\x12\x0f.context.LinkId\x1a\r.context.Link\"\x00\x12+\n\x07SetLink\x12\r.context.Link\x1a\x0f.context.LinkId\"\x00\x12/\n\nRemoveLink\x12\x0f.context.LinkId\x1a\x0e.context.Empty\"\x00\x12\x37\n\rGetLinkEvents\x12\x0e.context.Empty\x1a\x12.context.LinkEvent\"\x00\x30\x01\x12>\n\x0eListServiceIds\x12\x12.context.ContextId\x1a\x16.context.ServiceIdList\"\x00\x12:\n\x0cListServices\x12\x12.context.ContextId\x1a\x14.context.ServiceList\"\x00\x12\x34\n\nGetService\x12\x12.context.ServiceId\x1a\x10.context.Service\"\x00\x12\x34\n\nSetService\x12\x10.context.Service\x1a\x12.context.ServiceId\"\x00\x12\x36\n\x0cUnsetService\x12\x10.context.Service\x1a\x12.context.ServiceId\"\x00\x12\x35\n\rRemoveService\x12\x12.context.ServiceId\x1a\x0e.context.Empty\"\x00\x12=\n\x10GetServiceEvents\x12\x0e.context.Empty\x1a\x15.context.ServiceEvent\"\x00\x30\x01\x12?\n\rSelectService\x12\x16.context.ServiceFilter\x1a\x14.context.ServiceList\"\x00\x12:\n\x0cListSliceIds\x12\x12.context.ContextId\x1a\x14.context.SliceIdList\"\x00\x12\x36\n\nListSlices\x12\x12.context.ContextId\x1a\x12.context.SliceList\"\x00\x12.\n\x08GetSlice\x12\x10.context.SliceId\x1a\x0e.context.Slice\"\x00\x12.\n\x08SetSlice\x12\x0e.context.Slice\x1a\x10.context.SliceId\"\x00\x12\x30\n\nUnsetSlice\x12\x0e.context.Slice\x1a\x10.context.SliceId\"\x00\x12\x31\n\x0bRemoveSlice\x12\x10.context.SliceId\x1a\x0e.context.Empty\"\x00\x12\x39\n\x0eGetSliceEvents\x12\x0e.context.Empty\x1a\x13.context.SliceEvent\"\x00\x30\x01\x12\x39\n\x0bSelectSlice\x12\x14.context.SliceFilter\x1a\x12.context.SliceList\"\x00\x12\x44\n\x11ListConnectionIds\x12\x12.context.ServiceId\x1a\x19.context.ConnectionIdList\"\x00\x12@\n\x0fListConnections\x12\x12.context.ServiceId\x1a\x17.context.ConnectionList\"\x00\x12=\n\rGetConnection\x12\x15.context.ConnectionId\x1a\x13.context.Connection\"\x00\x12=\n\rSetConnection\x12\x13.context.Connection\x1a\x15.context.ConnectionId\"\x00\x12;\n\x10RemoveConnection\x12\x15.context.ConnectionId\x1a\x0e.context.Empty\"\x00\x12\x43\n\x13GetConnectionEvents\x12\x0e.context.Empty\x1a\x18.context.ConnectionEvent\"\x00\x30\x01\x12@\n\x10GetOpticalConfig\x12\x0e.context.Empty\x1a\x1a.context.OpticalConfigList\"\x00\x12\x46\n\x10SetOpticalConfig\x12\x16.context.OpticalConfig\x1a\x18.context.OpticalConfigId\"\x00\x12I\n\x13SelectOpticalConfig\x12\x18.context.OpticalConfigId\x1a\x16.context.OpticalConfig\"\x00\x12\x38\n\x0eSetOpticalLink\x12\x14.context.OpticalLink\x1a\x0e.context.Empty\"\x00\x12@\n\x0eGetOpticalLink\x12\x16.context.OpticalLinkId\x1a\x14.context.OpticalLink\"\x00\x12.\n\x08GetFiber\x12\x10.context.FiberId\x1a\x0e.context.Fiber\"\x00\x62\x06proto3')
+
+_EVENTTYPEENUM = DESCRIPTOR.enum_types_by_name['EventTypeEnum']
+EventTypeEnum = enum_type_wrapper.EnumTypeWrapper(_EVENTTYPEENUM)
+_DEVICEDRIVERENUM = DESCRIPTOR.enum_types_by_name['DeviceDriverEnum']
+DeviceDriverEnum = enum_type_wrapper.EnumTypeWrapper(_DEVICEDRIVERENUM)
+_DEVICEOPERATIONALSTATUSENUM = DESCRIPTOR.enum_types_by_name['DeviceOperationalStatusEnum']
+DeviceOperationalStatusEnum = enum_type_wrapper.EnumTypeWrapper(_DEVICEOPERATIONALSTATUSENUM)
+_SERVICETYPEENUM = DESCRIPTOR.enum_types_by_name['ServiceTypeEnum']
+ServiceTypeEnum = enum_type_wrapper.EnumTypeWrapper(_SERVICETYPEENUM)
+_SERVICESTATUSENUM = DESCRIPTOR.enum_types_by_name['ServiceStatusEnum']
+ServiceStatusEnum = enum_type_wrapper.EnumTypeWrapper(_SERVICESTATUSENUM)
+_SLICESTATUSENUM = DESCRIPTOR.enum_types_by_name['SliceStatusEnum']
+SliceStatusEnum = enum_type_wrapper.EnumTypeWrapper(_SLICESTATUSENUM)
+_CONFIGACTIONENUM = DESCRIPTOR.enum_types_by_name['ConfigActionEnum']
+ConfigActionEnum = enum_type_wrapper.EnumTypeWrapper(_CONFIGACTIONENUM)
+_CONSTRAINTACTIONENUM = DESCRIPTOR.enum_types_by_name['ConstraintActionEnum']
+ConstraintActionEnum = enum_type_wrapper.EnumTypeWrapper(_CONSTRAINTACTIONENUM)
+_ISOLATIONLEVELENUM = DESCRIPTOR.enum_types_by_name['IsolationLevelEnum']
+IsolationLevelEnum = enum_type_wrapper.EnumTypeWrapper(_ISOLATIONLEVELENUM)
+EVENTTYPE_UNDEFINED = 0
+EVENTTYPE_CREATE = 1
+EVENTTYPE_UPDATE = 2
+EVENTTYPE_REMOVE = 3
+DEVICEDRIVER_UNDEFINED = 0
+DEVICEDRIVER_OPENCONFIG = 1
+DEVICEDRIVER_TRANSPORT_API = 2
+DEVICEDRIVER_P4 = 3
+DEVICEDRIVER_IETF_NETWORK_TOPOLOGY = 4
+DEVICEDRIVER_ONF_TR_532 = 5
+DEVICEDRIVER_XR = 6
+DEVICEDRIVER_IETF_L2VPN = 7
+DEVICEDRIVER_GNMI_OPENCONFIG = 8
+DEVICEDRIVER_OPTICAL_TFS = 9
+DEVICEDRIVER_IETF_ACTN = 10
+DEVICEDRIVER_OC = 11
+DEVICEDRIVER_QKD = 12
+DEVICEOPERATIONALSTATUS_UNDEFINED = 0
+DEVICEOPERATIONALSTATUS_DISABLED = 1
+DEVICEOPERATIONALSTATUS_ENABLED = 2
+SERVICETYPE_UNKNOWN = 0
+SERVICETYPE_L3NM = 1
+SERVICETYPE_L2NM = 2
+SERVICETYPE_TAPI_CONNECTIVITY_SERVICE = 3
+SERVICETYPE_TE = 4
+SERVICETYPE_E2E = 5
+SERVICETYPE_OPTICAL_CONNECTIVITY = 6
+SERVICETYPE_QKD = 7
+SERVICESTATUS_UNDEFINED = 0
+SERVICESTATUS_PLANNED = 1
+SERVICESTATUS_ACTIVE = 2
+SERVICESTATUS_UPDATING = 3
+SERVICESTATUS_PENDING_REMOVAL = 4
+SERVICESTATUS_SLA_VIOLATED = 5
+SLICESTATUS_UNDEFINED = 0
+SLICESTATUS_PLANNED = 1
+SLICESTATUS_INIT = 2
+SLICESTATUS_ACTIVE = 3
+SLICESTATUS_DEINIT = 4
+SLICESTATUS_SLA_VIOLATED = 5
+CONFIGACTION_UNDEFINED = 0
+CONFIGACTION_SET = 1
+CONFIGACTION_DELETE = 2
+CONSTRAINTACTION_UNDEFINED = 0
+CONSTRAINTACTION_SET = 1
+CONSTRAINTACTION_DELETE = 2
+NO_ISOLATION = 0
+PHYSICAL_ISOLATION = 1
+LOGICAL_ISOLATION = 2
+PROCESS_ISOLATION = 3
+PHYSICAL_MEMORY_ISOLATION = 4
+PHYSICAL_NETWORK_ISOLATION = 5
+VIRTUAL_RESOURCE_ISOLATION = 6
+NETWORK_FUNCTIONS_ISOLATION = 7
+SERVICE_ISOLATION = 8
+
+
+_EMPTY = DESCRIPTOR.message_types_by_name['Empty']
+_UUID = DESCRIPTOR.message_types_by_name['Uuid']
+_TIMESTAMP = DESCRIPTOR.message_types_by_name['Timestamp']
+_EVENT = DESCRIPTOR.message_types_by_name['Event']
+_CONTEXTID = DESCRIPTOR.message_types_by_name['ContextId']
+_CONTEXT = DESCRIPTOR.message_types_by_name['Context']
+_CONTEXTIDLIST = DESCRIPTOR.message_types_by_name['ContextIdList']
+_CONTEXTLIST = DESCRIPTOR.message_types_by_name['ContextList']
+_CONTEXTEVENT = DESCRIPTOR.message_types_by_name['ContextEvent']
+_TOPOLOGYID = DESCRIPTOR.message_types_by_name['TopologyId']
+_TOPOLOGY = DESCRIPTOR.message_types_by_name['Topology']
+_TOPOLOGYDETAILS = DESCRIPTOR.message_types_by_name['TopologyDetails']
+_TOPOLOGYIDLIST = DESCRIPTOR.message_types_by_name['TopologyIdList']
+_TOPOLOGYLIST = DESCRIPTOR.message_types_by_name['TopologyList']
+_TOPOLOGYEVENT = DESCRIPTOR.message_types_by_name['TopologyEvent']
+_DEVICEID = DESCRIPTOR.message_types_by_name['DeviceId']
+_DEVICE = DESCRIPTOR.message_types_by_name['Device']
+_COMPONENT = DESCRIPTOR.message_types_by_name['Component']
+_COMPONENT_ATTRIBUTESENTRY = _COMPONENT.nested_types_by_name['AttributesEntry']
+_DEVICECONFIG = DESCRIPTOR.message_types_by_name['DeviceConfig']
+_DEVICEIDLIST = DESCRIPTOR.message_types_by_name['DeviceIdList']
+_DEVICELIST = DESCRIPTOR.message_types_by_name['DeviceList']
+_DEVICEFILTER = DESCRIPTOR.message_types_by_name['DeviceFilter']
+_DEVICEEVENT = DESCRIPTOR.message_types_by_name['DeviceEvent']
+_LINKID = DESCRIPTOR.message_types_by_name['LinkId']
+_LINKATTRIBUTES = DESCRIPTOR.message_types_by_name['LinkAttributes']
+_LINK = DESCRIPTOR.message_types_by_name['Link']
+_LINKIDLIST = DESCRIPTOR.message_types_by_name['LinkIdList']
+_LINKLIST = DESCRIPTOR.message_types_by_name['LinkList']
+_LINKEVENT = DESCRIPTOR.message_types_by_name['LinkEvent']
+_SERVICEID = DESCRIPTOR.message_types_by_name['ServiceId']
+_SERVICE = DESCRIPTOR.message_types_by_name['Service']
+_SERVICESTATUS = DESCRIPTOR.message_types_by_name['ServiceStatus']
+_SERVICECONFIG = DESCRIPTOR.message_types_by_name['ServiceConfig']
+_SERVICEIDLIST = DESCRIPTOR.message_types_by_name['ServiceIdList']
+_SERVICELIST = DESCRIPTOR.message_types_by_name['ServiceList']
+_SERVICEFILTER = DESCRIPTOR.message_types_by_name['ServiceFilter']
+_SERVICEEVENT = DESCRIPTOR.message_types_by_name['ServiceEvent']
+_SLICEID = DESCRIPTOR.message_types_by_name['SliceId']
+_SLICE = DESCRIPTOR.message_types_by_name['Slice']
+_SLICEOWNER = DESCRIPTOR.message_types_by_name['SliceOwner']
+_SLICESTATUS = DESCRIPTOR.message_types_by_name['SliceStatus']
+_SLICECONFIG = DESCRIPTOR.message_types_by_name['SliceConfig']
+_SLICEIDLIST = DESCRIPTOR.message_types_by_name['SliceIdList']
+_SLICELIST = DESCRIPTOR.message_types_by_name['SliceList']
+_SLICEFILTER = DESCRIPTOR.message_types_by_name['SliceFilter']
+_SLICEEVENT = DESCRIPTOR.message_types_by_name['SliceEvent']
+_CONNECTIONID = DESCRIPTOR.message_types_by_name['ConnectionId']
+_CONNECTIONSETTINGS_L0 = DESCRIPTOR.message_types_by_name['ConnectionSettings_L0']
+_CONNECTIONSETTINGS_L2 = DESCRIPTOR.message_types_by_name['ConnectionSettings_L2']
+_CONNECTIONSETTINGS_L3 = DESCRIPTOR.message_types_by_name['ConnectionSettings_L3']
+_CONNECTIONSETTINGS_L4 = DESCRIPTOR.message_types_by_name['ConnectionSettings_L4']
+_CONNECTIONSETTINGS = DESCRIPTOR.message_types_by_name['ConnectionSettings']
+_CONNECTION = DESCRIPTOR.message_types_by_name['Connection']
+_CONNECTIONIDLIST = DESCRIPTOR.message_types_by_name['ConnectionIdList']
+_CONNECTIONLIST = DESCRIPTOR.message_types_by_name['ConnectionList']
+_CONNECTIONEVENT = DESCRIPTOR.message_types_by_name['ConnectionEvent']
+_ENDPOINTID = DESCRIPTOR.message_types_by_name['EndPointId']
+_ENDPOINT = DESCRIPTOR.message_types_by_name['EndPoint']
+_ENDPOINTNAME = DESCRIPTOR.message_types_by_name['EndPointName']
+_ENDPOINTIDLIST = DESCRIPTOR.message_types_by_name['EndPointIdList']
+_ENDPOINTNAMELIST = DESCRIPTOR.message_types_by_name['EndPointNameList']
+_CONFIGRULE_CUSTOM = DESCRIPTOR.message_types_by_name['ConfigRule_Custom']
+_CONFIGRULE_ACL = DESCRIPTOR.message_types_by_name['ConfigRule_ACL']
+_CONFIGRULE = DESCRIPTOR.message_types_by_name['ConfigRule']
+_CONSTRAINT_CUSTOM = DESCRIPTOR.message_types_by_name['Constraint_Custom']
+_CONSTRAINT_SCHEDULE = DESCRIPTOR.message_types_by_name['Constraint_Schedule']
+_GPS_POSITION = DESCRIPTOR.message_types_by_name['GPS_Position']
+_LOCATION = DESCRIPTOR.message_types_by_name['Location']
+_CONSTRAINT_ENDPOINTLOCATION = DESCRIPTOR.message_types_by_name['Constraint_EndPointLocation']
+_CONSTRAINT_ENDPOINTPRIORITY = DESCRIPTOR.message_types_by_name['Constraint_EndPointPriority']
+_CONSTRAINT_SLA_LATENCY = DESCRIPTOR.message_types_by_name['Constraint_SLA_Latency']
+_CONSTRAINT_SLA_CAPACITY = DESCRIPTOR.message_types_by_name['Constraint_SLA_Capacity']
+_CONSTRAINT_SLA_AVAILABILITY = DESCRIPTOR.message_types_by_name['Constraint_SLA_Availability']
+_CONSTRAINT_SLA_ISOLATION_LEVEL = DESCRIPTOR.message_types_by_name['Constraint_SLA_Isolation_level']
+_CONSTRAINT_EXCLUSIONS = DESCRIPTOR.message_types_by_name['Constraint_Exclusions']
+_CONSTRAINT = DESCRIPTOR.message_types_by_name['Constraint']
+_TERAFLOWCONTROLLER = DESCRIPTOR.message_types_by_name['TeraFlowController']
+_AUTHENTICATIONRESULT = DESCRIPTOR.message_types_by_name['AuthenticationResult']
+_OPTICALCONFIGID = DESCRIPTOR.message_types_by_name['OpticalConfigId']
+_OPTICALCONFIG = DESCRIPTOR.message_types_by_name['OpticalConfig']
+_OPTICALCONFIGLIST = DESCRIPTOR.message_types_by_name['OpticalConfigList']
+_OPTICALLINKID = DESCRIPTOR.message_types_by_name['OpticalLinkId']
+_FIBERID = DESCRIPTOR.message_types_by_name['FiberId']
+_FIBER = DESCRIPTOR.message_types_by_name['Fiber']
+_OPTICALLINKDETAILS = DESCRIPTOR.message_types_by_name['OpticalLinkDetails']
+_OPTICALLINK = DESCRIPTOR.message_types_by_name['OpticalLink']
+Empty = _reflection.GeneratedProtocolMessageType('Empty', (_message.Message,), {
+  'DESCRIPTOR' : _EMPTY,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.Empty)
+  })
+_sym_db.RegisterMessage(Empty)
+
+Uuid = _reflection.GeneratedProtocolMessageType('Uuid', (_message.Message,), {
+  'DESCRIPTOR' : _UUID,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.Uuid)
+  })
+_sym_db.RegisterMessage(Uuid)
+
+Timestamp = _reflection.GeneratedProtocolMessageType('Timestamp', (_message.Message,), {
+  'DESCRIPTOR' : _TIMESTAMP,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.Timestamp)
+  })
+_sym_db.RegisterMessage(Timestamp)
+
+Event = _reflection.GeneratedProtocolMessageType('Event', (_message.Message,), {
+  'DESCRIPTOR' : _EVENT,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.Event)
+  })
+_sym_db.RegisterMessage(Event)
+
+ContextId = _reflection.GeneratedProtocolMessageType('ContextId', (_message.Message,), {
+  'DESCRIPTOR' : _CONTEXTID,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.ContextId)
+  })
+_sym_db.RegisterMessage(ContextId)
+
+Context = _reflection.GeneratedProtocolMessageType('Context', (_message.Message,), {
+  'DESCRIPTOR' : _CONTEXT,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.Context)
+  })
+_sym_db.RegisterMessage(Context)
+
+ContextIdList = _reflection.GeneratedProtocolMessageType('ContextIdList', (_message.Message,), {
+  'DESCRIPTOR' : _CONTEXTIDLIST,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.ContextIdList)
+  })
+_sym_db.RegisterMessage(ContextIdList)
+
+ContextList = _reflection.GeneratedProtocolMessageType('ContextList', (_message.Message,), {
+  'DESCRIPTOR' : _CONTEXTLIST,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.ContextList)
+  })
+_sym_db.RegisterMessage(ContextList)
+
+ContextEvent = _reflection.GeneratedProtocolMessageType('ContextEvent', (_message.Message,), {
+  'DESCRIPTOR' : _CONTEXTEVENT,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.ContextEvent)
+  })
+_sym_db.RegisterMessage(ContextEvent)
+
+TopologyId = _reflection.GeneratedProtocolMessageType('TopologyId', (_message.Message,), {
+  'DESCRIPTOR' : _TOPOLOGYID,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.TopologyId)
+  })
+_sym_db.RegisterMessage(TopologyId)
+
+Topology = _reflection.GeneratedProtocolMessageType('Topology', (_message.Message,), {
+  'DESCRIPTOR' : _TOPOLOGY,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.Topology)
+  })
+_sym_db.RegisterMessage(Topology)
+
+TopologyDetails = _reflection.GeneratedProtocolMessageType('TopologyDetails', (_message.Message,), {
+  'DESCRIPTOR' : _TOPOLOGYDETAILS,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.TopologyDetails)
+  })
+_sym_db.RegisterMessage(TopologyDetails)
+
+TopologyIdList = _reflection.GeneratedProtocolMessageType('TopologyIdList', (_message.Message,), {
+  'DESCRIPTOR' : _TOPOLOGYIDLIST,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.TopologyIdList)
+  })
+_sym_db.RegisterMessage(TopologyIdList)
+
+TopologyList = _reflection.GeneratedProtocolMessageType('TopologyList', (_message.Message,), {
+  'DESCRIPTOR' : _TOPOLOGYLIST,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.TopologyList)
+  })
+_sym_db.RegisterMessage(TopologyList)
+
+TopologyEvent = _reflection.GeneratedProtocolMessageType('TopologyEvent', (_message.Message,), {
+  'DESCRIPTOR' : _TOPOLOGYEVENT,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.TopologyEvent)
+  })
+_sym_db.RegisterMessage(TopologyEvent)
+
+DeviceId = _reflection.GeneratedProtocolMessageType('DeviceId', (_message.Message,), {
+  'DESCRIPTOR' : _DEVICEID,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.DeviceId)
+  })
+_sym_db.RegisterMessage(DeviceId)
+
+Device = _reflection.GeneratedProtocolMessageType('Device', (_message.Message,), {
+  'DESCRIPTOR' : _DEVICE,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.Device)
+  })
+_sym_db.RegisterMessage(Device)
+
+Component = _reflection.GeneratedProtocolMessageType('Component', (_message.Message,), {
+
+  'AttributesEntry' : _reflection.GeneratedProtocolMessageType('AttributesEntry', (_message.Message,), {
+    'DESCRIPTOR' : _COMPONENT_ATTRIBUTESENTRY,
+    '__module__' : 'context_pb2'
+    # @@protoc_insertion_point(class_scope:context.Component.AttributesEntry)
+    })
+  ,
+  'DESCRIPTOR' : _COMPONENT,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.Component)
+  })
+_sym_db.RegisterMessage(Component)
+_sym_db.RegisterMessage(Component.AttributesEntry)
+
+DeviceConfig = _reflection.GeneratedProtocolMessageType('DeviceConfig', (_message.Message,), {
+  'DESCRIPTOR' : _DEVICECONFIG,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.DeviceConfig)
+  })
+_sym_db.RegisterMessage(DeviceConfig)
+
+DeviceIdList = _reflection.GeneratedProtocolMessageType('DeviceIdList', (_message.Message,), {
+  'DESCRIPTOR' : _DEVICEIDLIST,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.DeviceIdList)
+  })
+_sym_db.RegisterMessage(DeviceIdList)
+
+DeviceList = _reflection.GeneratedProtocolMessageType('DeviceList', (_message.Message,), {
+  'DESCRIPTOR' : _DEVICELIST,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.DeviceList)
+  })
+_sym_db.RegisterMessage(DeviceList)
+
+DeviceFilter = _reflection.GeneratedProtocolMessageType('DeviceFilter', (_message.Message,), {
+  'DESCRIPTOR' : _DEVICEFILTER,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.DeviceFilter)
+  })
+_sym_db.RegisterMessage(DeviceFilter)
+
+DeviceEvent = _reflection.GeneratedProtocolMessageType('DeviceEvent', (_message.Message,), {
+  'DESCRIPTOR' : _DEVICEEVENT,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.DeviceEvent)
+  })
+_sym_db.RegisterMessage(DeviceEvent)
+
+LinkId = _reflection.GeneratedProtocolMessageType('LinkId', (_message.Message,), {
+  'DESCRIPTOR' : _LINKID,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.LinkId)
+  })
+_sym_db.RegisterMessage(LinkId)
+
+LinkAttributes = _reflection.GeneratedProtocolMessageType('LinkAttributes', (_message.Message,), {
+  'DESCRIPTOR' : _LINKATTRIBUTES,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.LinkAttributes)
+  })
+_sym_db.RegisterMessage(LinkAttributes)
+
+Link = _reflection.GeneratedProtocolMessageType('Link', (_message.Message,), {
+  'DESCRIPTOR' : _LINK,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.Link)
+  })
+_sym_db.RegisterMessage(Link)
+
+LinkIdList = _reflection.GeneratedProtocolMessageType('LinkIdList', (_message.Message,), {
+  'DESCRIPTOR' : _LINKIDLIST,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.LinkIdList)
+  })
+_sym_db.RegisterMessage(LinkIdList)
+
+LinkList = _reflection.GeneratedProtocolMessageType('LinkList', (_message.Message,), {
+  'DESCRIPTOR' : _LINKLIST,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.LinkList)
+  })
+_sym_db.RegisterMessage(LinkList)
+
+LinkEvent = _reflection.GeneratedProtocolMessageType('LinkEvent', (_message.Message,), {
+  'DESCRIPTOR' : _LINKEVENT,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.LinkEvent)
+  })
+_sym_db.RegisterMessage(LinkEvent)
+
+ServiceId = _reflection.GeneratedProtocolMessageType('ServiceId', (_message.Message,), {
+  'DESCRIPTOR' : _SERVICEID,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.ServiceId)
+  })
+_sym_db.RegisterMessage(ServiceId)
+
+Service = _reflection.GeneratedProtocolMessageType('Service', (_message.Message,), {
+  'DESCRIPTOR' : _SERVICE,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.Service)
+  })
+_sym_db.RegisterMessage(Service)
+
+ServiceStatus = _reflection.GeneratedProtocolMessageType('ServiceStatus', (_message.Message,), {
+  'DESCRIPTOR' : _SERVICESTATUS,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.ServiceStatus)
+  })
+_sym_db.RegisterMessage(ServiceStatus)
+
+ServiceConfig = _reflection.GeneratedProtocolMessageType('ServiceConfig', (_message.Message,), {
+  'DESCRIPTOR' : _SERVICECONFIG,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.ServiceConfig)
+  })
+_sym_db.RegisterMessage(ServiceConfig)
+
+ServiceIdList = _reflection.GeneratedProtocolMessageType('ServiceIdList', (_message.Message,), {
+  'DESCRIPTOR' : _SERVICEIDLIST,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.ServiceIdList)
+  })
+_sym_db.RegisterMessage(ServiceIdList)
+
+ServiceList = _reflection.GeneratedProtocolMessageType('ServiceList', (_message.Message,), {
+  'DESCRIPTOR' : _SERVICELIST,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.ServiceList)
+  })
+_sym_db.RegisterMessage(ServiceList)
+
+ServiceFilter = _reflection.GeneratedProtocolMessageType('ServiceFilter', (_message.Message,), {
+  'DESCRIPTOR' : _SERVICEFILTER,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.ServiceFilter)
+  })
+_sym_db.RegisterMessage(ServiceFilter)
+
+ServiceEvent = _reflection.GeneratedProtocolMessageType('ServiceEvent', (_message.Message,), {
+  'DESCRIPTOR' : _SERVICEEVENT,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.ServiceEvent)
+  })
+_sym_db.RegisterMessage(ServiceEvent)
+
+SliceId = _reflection.GeneratedProtocolMessageType('SliceId', (_message.Message,), {
+  'DESCRIPTOR' : _SLICEID,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.SliceId)
+  })
+_sym_db.RegisterMessage(SliceId)
+
+Slice = _reflection.GeneratedProtocolMessageType('Slice', (_message.Message,), {
+  'DESCRIPTOR' : _SLICE,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.Slice)
+  })
+_sym_db.RegisterMessage(Slice)
+
+SliceOwner = _reflection.GeneratedProtocolMessageType('SliceOwner', (_message.Message,), {
+  'DESCRIPTOR' : _SLICEOWNER,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.SliceOwner)
+  })
+_sym_db.RegisterMessage(SliceOwner)
+
+SliceStatus = _reflection.GeneratedProtocolMessageType('SliceStatus', (_message.Message,), {
+  'DESCRIPTOR' : _SLICESTATUS,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.SliceStatus)
+  })
+_sym_db.RegisterMessage(SliceStatus)
+
+SliceConfig = _reflection.GeneratedProtocolMessageType('SliceConfig', (_message.Message,), {
+  'DESCRIPTOR' : _SLICECONFIG,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.SliceConfig)
+  })
+_sym_db.RegisterMessage(SliceConfig)
+
+SliceIdList = _reflection.GeneratedProtocolMessageType('SliceIdList', (_message.Message,), {
+  'DESCRIPTOR' : _SLICEIDLIST,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.SliceIdList)
+  })
+_sym_db.RegisterMessage(SliceIdList)
+
+SliceList = _reflection.GeneratedProtocolMessageType('SliceList', (_message.Message,), {
+  'DESCRIPTOR' : _SLICELIST,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.SliceList)
+  })
+_sym_db.RegisterMessage(SliceList)
+
+SliceFilter = _reflection.GeneratedProtocolMessageType('SliceFilter', (_message.Message,), {
+  'DESCRIPTOR' : _SLICEFILTER,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.SliceFilter)
+  })
+_sym_db.RegisterMessage(SliceFilter)
+
+SliceEvent = _reflection.GeneratedProtocolMessageType('SliceEvent', (_message.Message,), {
+  'DESCRIPTOR' : _SLICEEVENT,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.SliceEvent)
+  })
+_sym_db.RegisterMessage(SliceEvent)
+
+ConnectionId = _reflection.GeneratedProtocolMessageType('ConnectionId', (_message.Message,), {
+  'DESCRIPTOR' : _CONNECTIONID,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.ConnectionId)
+  })
+_sym_db.RegisterMessage(ConnectionId)
+
+ConnectionSettings_L0 = _reflection.GeneratedProtocolMessageType('ConnectionSettings_L0', (_message.Message,), {
+  'DESCRIPTOR' : _CONNECTIONSETTINGS_L0,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.ConnectionSettings_L0)
+  })
+_sym_db.RegisterMessage(ConnectionSettings_L0)
+
+ConnectionSettings_L2 = _reflection.GeneratedProtocolMessageType('ConnectionSettings_L2', (_message.Message,), {
+  'DESCRIPTOR' : _CONNECTIONSETTINGS_L2,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.ConnectionSettings_L2)
+  })
+_sym_db.RegisterMessage(ConnectionSettings_L2)
+
+ConnectionSettings_L3 = _reflection.GeneratedProtocolMessageType('ConnectionSettings_L3', (_message.Message,), {
+  'DESCRIPTOR' : _CONNECTIONSETTINGS_L3,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.ConnectionSettings_L3)
+  })
+_sym_db.RegisterMessage(ConnectionSettings_L3)
+
+ConnectionSettings_L4 = _reflection.GeneratedProtocolMessageType('ConnectionSettings_L4', (_message.Message,), {
+  'DESCRIPTOR' : _CONNECTIONSETTINGS_L4,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.ConnectionSettings_L4)
+  })
+_sym_db.RegisterMessage(ConnectionSettings_L4)
+
+ConnectionSettings = _reflection.GeneratedProtocolMessageType('ConnectionSettings', (_message.Message,), {
+  'DESCRIPTOR' : _CONNECTIONSETTINGS,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.ConnectionSettings)
+  })
+_sym_db.RegisterMessage(ConnectionSettings)
+
+Connection = _reflection.GeneratedProtocolMessageType('Connection', (_message.Message,), {
+  'DESCRIPTOR' : _CONNECTION,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.Connection)
+  })
+_sym_db.RegisterMessage(Connection)
+
+ConnectionIdList = _reflection.GeneratedProtocolMessageType('ConnectionIdList', (_message.Message,), {
+  'DESCRIPTOR' : _CONNECTIONIDLIST,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.ConnectionIdList)
+  })
+_sym_db.RegisterMessage(ConnectionIdList)
+
+ConnectionList = _reflection.GeneratedProtocolMessageType('ConnectionList', (_message.Message,), {
+  'DESCRIPTOR' : _CONNECTIONLIST,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.ConnectionList)
+  })
+_sym_db.RegisterMessage(ConnectionList)
+
+ConnectionEvent = _reflection.GeneratedProtocolMessageType('ConnectionEvent', (_message.Message,), {
+  'DESCRIPTOR' : _CONNECTIONEVENT,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.ConnectionEvent)
+  })
+_sym_db.RegisterMessage(ConnectionEvent)
+
+EndPointId = _reflection.GeneratedProtocolMessageType('EndPointId', (_message.Message,), {
+  'DESCRIPTOR' : _ENDPOINTID,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.EndPointId)
+  })
+_sym_db.RegisterMessage(EndPointId)
+
+EndPoint = _reflection.GeneratedProtocolMessageType('EndPoint', (_message.Message,), {
+  'DESCRIPTOR' : _ENDPOINT,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.EndPoint)
+  })
+_sym_db.RegisterMessage(EndPoint)
+
+EndPointName = _reflection.GeneratedProtocolMessageType('EndPointName', (_message.Message,), {
+  'DESCRIPTOR' : _ENDPOINTNAME,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.EndPointName)
+  })
+_sym_db.RegisterMessage(EndPointName)
+
+EndPointIdList = _reflection.GeneratedProtocolMessageType('EndPointIdList', (_message.Message,), {
+  'DESCRIPTOR' : _ENDPOINTIDLIST,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.EndPointIdList)
+  })
+_sym_db.RegisterMessage(EndPointIdList)
+
+EndPointNameList = _reflection.GeneratedProtocolMessageType('EndPointNameList', (_message.Message,), {
+  'DESCRIPTOR' : _ENDPOINTNAMELIST,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.EndPointNameList)
+  })
+_sym_db.RegisterMessage(EndPointNameList)
+
+ConfigRule_Custom = _reflection.GeneratedProtocolMessageType('ConfigRule_Custom', (_message.Message,), {
+  'DESCRIPTOR' : _CONFIGRULE_CUSTOM,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.ConfigRule_Custom)
+  })
+_sym_db.RegisterMessage(ConfigRule_Custom)
+
+ConfigRule_ACL = _reflection.GeneratedProtocolMessageType('ConfigRule_ACL', (_message.Message,), {
+  'DESCRIPTOR' : _CONFIGRULE_ACL,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.ConfigRule_ACL)
+  })
+_sym_db.RegisterMessage(ConfigRule_ACL)
+
+ConfigRule = _reflection.GeneratedProtocolMessageType('ConfigRule', (_message.Message,), {
+  'DESCRIPTOR' : _CONFIGRULE,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.ConfigRule)
+  })
+_sym_db.RegisterMessage(ConfigRule)
+
+Constraint_Custom = _reflection.GeneratedProtocolMessageType('Constraint_Custom', (_message.Message,), {
+  'DESCRIPTOR' : _CONSTRAINT_CUSTOM,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.Constraint_Custom)
+  })
+_sym_db.RegisterMessage(Constraint_Custom)
+
+Constraint_Schedule = _reflection.GeneratedProtocolMessageType('Constraint_Schedule', (_message.Message,), {
+  'DESCRIPTOR' : _CONSTRAINT_SCHEDULE,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.Constraint_Schedule)
+  })
+_sym_db.RegisterMessage(Constraint_Schedule)
+
+GPS_Position = _reflection.GeneratedProtocolMessageType('GPS_Position', (_message.Message,), {
+  'DESCRIPTOR' : _GPS_POSITION,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.GPS_Position)
+  })
+_sym_db.RegisterMessage(GPS_Position)
+
+Location = _reflection.GeneratedProtocolMessageType('Location', (_message.Message,), {
+  'DESCRIPTOR' : _LOCATION,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.Location)
+  })
+_sym_db.RegisterMessage(Location)
+
+Constraint_EndPointLocation = _reflection.GeneratedProtocolMessageType('Constraint_EndPointLocation', (_message.Message,), {
+  'DESCRIPTOR' : _CONSTRAINT_ENDPOINTLOCATION,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.Constraint_EndPointLocation)
+  })
+_sym_db.RegisterMessage(Constraint_EndPointLocation)
+
+Constraint_EndPointPriority = _reflection.GeneratedProtocolMessageType('Constraint_EndPointPriority', (_message.Message,), {
+  'DESCRIPTOR' : _CONSTRAINT_ENDPOINTPRIORITY,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.Constraint_EndPointPriority)
+  })
+_sym_db.RegisterMessage(Constraint_EndPointPriority)
+
+Constraint_SLA_Latency = _reflection.GeneratedProtocolMessageType('Constraint_SLA_Latency', (_message.Message,), {
+  'DESCRIPTOR' : _CONSTRAINT_SLA_LATENCY,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.Constraint_SLA_Latency)
+  })
+_sym_db.RegisterMessage(Constraint_SLA_Latency)
+
+Constraint_SLA_Capacity = _reflection.GeneratedProtocolMessageType('Constraint_SLA_Capacity', (_message.Message,), {
+  'DESCRIPTOR' : _CONSTRAINT_SLA_CAPACITY,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.Constraint_SLA_Capacity)
+  })
+_sym_db.RegisterMessage(Constraint_SLA_Capacity)
+
+Constraint_SLA_Availability = _reflection.GeneratedProtocolMessageType('Constraint_SLA_Availability', (_message.Message,), {
+  'DESCRIPTOR' : _CONSTRAINT_SLA_AVAILABILITY,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.Constraint_SLA_Availability)
+  })
+_sym_db.RegisterMessage(Constraint_SLA_Availability)
+
+Constraint_SLA_Isolation_level = _reflection.GeneratedProtocolMessageType('Constraint_SLA_Isolation_level', (_message.Message,), {
+  'DESCRIPTOR' : _CONSTRAINT_SLA_ISOLATION_LEVEL,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.Constraint_SLA_Isolation_level)
+  })
+_sym_db.RegisterMessage(Constraint_SLA_Isolation_level)
+
+Constraint_Exclusions = _reflection.GeneratedProtocolMessageType('Constraint_Exclusions', (_message.Message,), {
+  'DESCRIPTOR' : _CONSTRAINT_EXCLUSIONS,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.Constraint_Exclusions)
+  })
+_sym_db.RegisterMessage(Constraint_Exclusions)
+
+Constraint = _reflection.GeneratedProtocolMessageType('Constraint', (_message.Message,), {
+  'DESCRIPTOR' : _CONSTRAINT,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.Constraint)
+  })
+_sym_db.RegisterMessage(Constraint)
+
+TeraFlowController = _reflection.GeneratedProtocolMessageType('TeraFlowController', (_message.Message,), {
+  'DESCRIPTOR' : _TERAFLOWCONTROLLER,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.TeraFlowController)
+  })
+_sym_db.RegisterMessage(TeraFlowController)
+
+AuthenticationResult = _reflection.GeneratedProtocolMessageType('AuthenticationResult', (_message.Message,), {
+  'DESCRIPTOR' : _AUTHENTICATIONRESULT,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.AuthenticationResult)
+  })
+_sym_db.RegisterMessage(AuthenticationResult)
+
+OpticalConfigId = _reflection.GeneratedProtocolMessageType('OpticalConfigId', (_message.Message,), {
+  'DESCRIPTOR' : _OPTICALCONFIGID,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.OpticalConfigId)
+  })
+_sym_db.RegisterMessage(OpticalConfigId)
+
+OpticalConfig = _reflection.GeneratedProtocolMessageType('OpticalConfig', (_message.Message,), {
+  'DESCRIPTOR' : _OPTICALCONFIG,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.OpticalConfig)
+  })
+_sym_db.RegisterMessage(OpticalConfig)
+
+OpticalConfigList = _reflection.GeneratedProtocolMessageType('OpticalConfigList', (_message.Message,), {
+  'DESCRIPTOR' : _OPTICALCONFIGLIST,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.OpticalConfigList)
+  })
+_sym_db.RegisterMessage(OpticalConfigList)
+
+OpticalLinkId = _reflection.GeneratedProtocolMessageType('OpticalLinkId', (_message.Message,), {
+  'DESCRIPTOR' : _OPTICALLINKID,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.OpticalLinkId)
+  })
+_sym_db.RegisterMessage(OpticalLinkId)
+
+FiberId = _reflection.GeneratedProtocolMessageType('FiberId', (_message.Message,), {
+  'DESCRIPTOR' : _FIBERID,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.FiberId)
+  })
+_sym_db.RegisterMessage(FiberId)
+
+Fiber = _reflection.GeneratedProtocolMessageType('Fiber', (_message.Message,), {
+  'DESCRIPTOR' : _FIBER,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.Fiber)
+  })
+_sym_db.RegisterMessage(Fiber)
+
+OpticalLinkDetails = _reflection.GeneratedProtocolMessageType('OpticalLinkDetails', (_message.Message,), {
+  'DESCRIPTOR' : _OPTICALLINKDETAILS,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.OpticalLinkDetails)
+  })
+_sym_db.RegisterMessage(OpticalLinkDetails)
+
+OpticalLink = _reflection.GeneratedProtocolMessageType('OpticalLink', (_message.Message,), {
+  'DESCRIPTOR' : _OPTICALLINK,
+  '__module__' : 'context_pb2'
+  # @@protoc_insertion_point(class_scope:context.OpticalLink)
+  })
+_sym_db.RegisterMessage(OpticalLink)
+
+_CONTEXTSERVICE = DESCRIPTOR.services_by_name['ContextService']
+if _descriptor._USE_C_DESCRIPTORS == False:
+
+  DESCRIPTOR._options = None
+  _COMPONENT_ATTRIBUTESENTRY._options = None
+  _COMPONENT_ATTRIBUTESENTRY._serialized_options = b'8\001'
+  _EVENTTYPEENUM._serialized_start=9328
+  _EVENTTYPEENUM._serialized_end=9434
+  _DEVICEDRIVERENUM._serialized_start=9437
+  _DEVICEDRIVERENUM._serialized_end=9819
+  _DEVICEOPERATIONALSTATUSENUM._serialized_start=9822
+  _DEVICEOPERATIONALSTATUSENUM._serialized_end=9965
+  _SERVICETYPEENUM._serialized_start=9968
+  _SERVICETYPEENUM._serialized_end=10197
+  _SERVICESTATUSENUM._serialized_start=10200
+  _SERVICESTATUSENUM._serialized_end=10396
+  _SLICESTATUSENUM._serialized_start=10399
+  _SLICESTATUSENUM._serialized_end=10568
+  _CONFIGACTIONENUM._serialized_start=10570
+  _CONFIGACTIONENUM._serialized_end=10663
+  _CONSTRAINTACTIONENUM._serialized_start=10665
+  _CONSTRAINTACTIONENUM._serialized_end=10774
+  _ISOLATIONLEVELENUM._serialized_start=10777
+  _ISOLATIONLEVELENUM._serialized_end=11036
+  _EMPTY._serialized_start=61
+  _EMPTY._serialized_end=68
+  _UUID._serialized_start=70
+  _UUID._serialized_end=90
+  _TIMESTAMP._serialized_start=92
+  _TIMESTAMP._serialized_end=122
+  _EVENT._serialized_start=124
+  _EVENT._serialized_end=214
+  _CONTEXTID._serialized_start=216
+  _CONTEXTID._serialized_end=264
+  _CONTEXT._serialized_start=267
+  _CONTEXT._serialized_end=500
+  _CONTEXTIDLIST._serialized_start=502
+  _CONTEXTIDLIST._serialized_end=558
+  _CONTEXTLIST._serialized_start=560
+  _CONTEXTLIST._serialized_end=609
+  _CONTEXTEVENT._serialized_start=611
+  _CONTEXTEVENT._serialized_end=696
+  _TOPOLOGYID._serialized_start=698
+  _TOPOLOGYID._serialized_end=788
+  _TOPOLOGY._serialized_start=791
+  _TOPOLOGY._serialized_end=931
+  _TOPOLOGYDETAILS._serialized_start=934
+  _TOPOLOGYDETAILS._serialized_end=1071
+  _TOPOLOGYIDLIST._serialized_start=1073
+  _TOPOLOGYIDLIST._serialized_end=1132
+  _TOPOLOGYLIST._serialized_start=1134
+  _TOPOLOGYLIST._serialized_end=1187
+  _TOPOLOGYEVENT._serialized_start=1189
+  _TOPOLOGYEVENT._serialized_end=1277
+  _DEVICEID._serialized_start=1279
+  _DEVICEID._serialized_end=1325
+  _DEVICE._serialized_start=1328
+  _DEVICE._serialized_end=1706
+  _COMPONENT._serialized_start=1709
+  _COMPONENT._serialized_end=1910
+  _COMPONENT_ATTRIBUTESENTRY._serialized_start=1861
+  _COMPONENT_ATTRIBUTESENTRY._serialized_end=1910
+  _DEVICECONFIG._serialized_start=1912
+  _DEVICECONFIG._serialized_end=1969
+  _DEVICEIDLIST._serialized_start=1971
+  _DEVICEIDLIST._serialized_end=2024
+  _DEVICELIST._serialized_start=2026
+  _DEVICELIST._serialized_end=2072
+  _DEVICEFILTER._serialized_start=2075
+  _DEVICEFILTER._serialized_end=2217
+  _DEVICEEVENT._serialized_start=2220
+  _DEVICEEVENT._serialized_end=2348
+  _LINKID._serialized_start=2350
+  _LINKID._serialized_end=2392
+  _LINKATTRIBUTES._serialized_start=2394
+  _LINKATTRIBUTES._serialized_end=2467
+  _LINK._serialized_start=2470
+  _LINK._serialized_end=2617
+  _LINKIDLIST._serialized_start=2619
+  _LINKIDLIST._serialized_end=2666
+  _LINKLIST._serialized_start=2668
+  _LINKLIST._serialized_end=2708
+  _LINKEVENT._serialized_start=2710
+  _LINKEVENT._serialized_end=2786
+  _SERVICEID._serialized_start=2788
+  _SERVICEID._serialized_end=2876
+  _SERVICE._serialized_start=2879
+  _SERVICE._serialized_end=3226
+  _SERVICESTATUS._serialized_start=3228
+  _SERVICESTATUS._serialized_end=3295
+  _SERVICECONFIG._serialized_start=3297
+  _SERVICECONFIG._serialized_end=3355
+  _SERVICEIDLIST._serialized_start=3357
+  _SERVICEIDLIST._serialized_end=3413
+  _SERVICELIST._serialized_start=3415
+  _SERVICELIST._serialized_end=3464
+  _SERVICEFILTER._serialized_start=3467
+  _SERVICEFILTER._serialized_end=3616
+  _SERVICEEVENT._serialized_start=3618
+  _SERVICEEVENT._serialized_end=3703
+  _SLICEID._serialized_start=3705
+  _SLICEID._serialized_end=3789
+  _SLICE._serialized_start=3792
+  _SLICE._serialized_end=4208
+  _SLICEOWNER._serialized_start=4210
+  _SLICEOWNER._serialized_end=4279
+  _SLICESTATUS._serialized_start=4281
+  _SLICESTATUS._serialized_end=4342
+  _SLICECONFIG._serialized_start=4344
+  _SLICECONFIG._serialized_end=4400
+  _SLICEIDLIST._serialized_start=4402
+  _SLICEIDLIST._serialized_end=4452
+  _SLICELIST._serialized_start=4454
+  _SLICELIST._serialized_end=4497
+  _SLICEFILTER._serialized_start=4500
+  _SLICEFILTER._serialized_end=4702
+  _SLICEEVENT._serialized_start=4704
+  _SLICEEVENT._serialized_end=4783
+  _CONNECTIONID._serialized_start=4785
+  _CONNECTIONID._serialized_end=4839
+  _CONNECTIONSETTINGS_L0._serialized_start=4841
+  _CONNECTIONSETTINGS_L0._serialized_end=4891
+  _CONNECTIONSETTINGS_L2._serialized_start=4894
+  _CONNECTIONSETTINGS_L2._serialized_end=5052
+  _CONNECTIONSETTINGS_L3._serialized_start=5054
+  _CONNECTIONSETTINGS_L3._serialized_end=5170
+  _CONNECTIONSETTINGS_L4._serialized_start=5172
+  _CONNECTIONSETTINGS_L4._serialized_end=5263
+  _CONNECTIONSETTINGS._serialized_start=5266
+  _CONNECTIONSETTINGS._serialized_end=5462
+  _CONNECTION._serialized_start=5465
+  _CONNECTION._serialized_end=5708
+  _CONNECTIONIDLIST._serialized_start=5710
+  _CONNECTIONIDLIST._serialized_end=5775
+  _CONNECTIONLIST._serialized_start=5777
+  _CONNECTIONLIST._serialized_end=5835
+  _CONNECTIONEVENT._serialized_start=5837
+  _CONNECTIONEVENT._serialized_end=5931
+  _ENDPOINTID._serialized_start=5934
+  _ENDPOINTID._serialized_end=6064
+  _ENDPOINT._serialized_start=6067
+  _ENDPOINT._serialized_end=6261
+  _ENDPOINTNAME._serialized_start=6263
+  _ENDPOINTNAME._serialized_end=6386
+  _ENDPOINTIDLIST._serialized_start=6388
+  _ENDPOINTIDLIST._serialized_end=6447
+  _ENDPOINTNAMELIST._serialized_start=6449
+  _ENDPOINTNAMELIST._serialized_end=6514
+  _CONFIGRULE_CUSTOM._serialized_start=6516
+  _CONFIGRULE_CUSTOM._serialized_end=6581
+  _CONFIGRULE_ACL._serialized_start=6583
+  _CONFIGRULE_ACL._serialized_end=6676
+  _CONFIGRULE._serialized_start=6679
+  _CONFIGRULE._serialized_end=6835
+  _CONSTRAINT_CUSTOM._serialized_start=6837
+  _CONSTRAINT_CUSTOM._serialized_end=6907
+  _CONSTRAINT_SCHEDULE._serialized_start=6909
+  _CONSTRAINT_SCHEDULE._serialized_end=6978
+  _GPS_POSITION._serialized_start=6980
+  _GPS_POSITION._serialized_end=7031
+  _LOCATION._serialized_start=7033
+  _LOCATION._serialized_end=7120
+  _CONSTRAINT_ENDPOINTLOCATION._serialized_start=7122
+  _CONSTRAINT_ENDPOINTLOCATION._serialized_end=7230
+  _CONSTRAINT_ENDPOINTPRIORITY._serialized_start=7232
+  _CONSTRAINT_ENDPOINTPRIORITY._serialized_end=7321
+  _CONSTRAINT_SLA_LATENCY._serialized_start=7323
+  _CONSTRAINT_SLA_LATENCY._serialized_end=7371
+  _CONSTRAINT_SLA_CAPACITY._serialized_start=7373
+  _CONSTRAINT_SLA_CAPACITY._serialized_end=7421
+  _CONSTRAINT_SLA_AVAILABILITY._serialized_start=7423
+  _CONSTRAINT_SLA_AVAILABILITY._serialized_end=7522
+  _CONSTRAINT_SLA_ISOLATION_LEVEL._serialized_start=7524
+  _CONSTRAINT_SLA_ISOLATION_LEVEL._serialized_end=7610
+  _CONSTRAINT_EXCLUSIONS._serialized_start=7613
+  _CONSTRAINT_EXCLUSIONS._serialized_end=7775
+  _CONSTRAINT._serialized_start=7778
+  _CONSTRAINT._serialized_end=8381
+  _TERAFLOWCONTROLLER._serialized_start=8383
+  _TERAFLOWCONTROLLER._serialized_end=8477
+  _AUTHENTICATIONRESULT._serialized_start=8479
+  _AUTHENTICATIONRESULT._serialized_end=8564
+  _OPTICALCONFIGID._serialized_start=8566
+  _OPTICALCONFIGID._serialized_end=8611
+  _OPTICALCONFIG._serialized_start=8613
+  _OPTICALCONFIG._serialized_end=8696
+  _OPTICALCONFIGLIST._serialized_start=8698
+  _OPTICALCONFIGLIST._serialized_end=8765
+  _OPTICALLINKID._serialized_start=8767
+  _OPTICALLINKID._serialized_end=8824
+  _FIBERID._serialized_start=8826
+  _FIBERID._serialized_end=8870
+  _FIBER._serialized_start=8873
+  _FIBER._serialized_end=9098
+  _OPTICALLINKDETAILS._serialized_start=9100
+  _OPTICALLINKDETAILS._serialized_end=9200
+  _OPTICALLINK._serialized_start=9202
+  _OPTICALLINK._serialized_end=9326
+  _CONTEXTSERVICE._serialized_start=11039
+  _CONTEXTSERVICE._serialized_end=14277
+# @@protoc_insertion_point(module_scope)
diff --git a/src/device/tests/qkd/unit/generated/context_pb2_grpc.py b/src/device/tests/qkd/unit/generated/context_pb2_grpc.py
new file mode 100644
index 0000000000000000000000000000000000000000..ed6e153bae63087427556757d23afec3ee595561
--- /dev/null
+++ b/src/device/tests/qkd/unit/generated/context_pb2_grpc.py
@@ -0,0 +1,1849 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+"""Client and server classes corresponding to protobuf-defined services."""
+import grpc
+
+import context_pb2 as context__pb2
+
+
+class ContextServiceStub(object):
+    """Missing associated documentation comment in .proto file."""
+
+    def __init__(self, channel):
+        """Constructor.
+
+        Args:
+            channel: A grpc.Channel.
+        """
+        self.ListContextIds = channel.unary_unary(
+                '/context.ContextService/ListContextIds',
+                request_serializer=context__pb2.Empty.SerializeToString,
+                response_deserializer=context__pb2.ContextIdList.FromString,
+                )
+        self.ListContexts = channel.unary_unary(
+                '/context.ContextService/ListContexts',
+                request_serializer=context__pb2.Empty.SerializeToString,
+                response_deserializer=context__pb2.ContextList.FromString,
+                )
+        self.GetContext = channel.unary_unary(
+                '/context.ContextService/GetContext',
+                request_serializer=context__pb2.ContextId.SerializeToString,
+                response_deserializer=context__pb2.Context.FromString,
+                )
+        self.SetContext = channel.unary_unary(
+                '/context.ContextService/SetContext',
+                request_serializer=context__pb2.Context.SerializeToString,
+                response_deserializer=context__pb2.ContextId.FromString,
+                )
+        self.RemoveContext = channel.unary_unary(
+                '/context.ContextService/RemoveContext',
+                request_serializer=context__pb2.ContextId.SerializeToString,
+                response_deserializer=context__pb2.Empty.FromString,
+                )
+        self.GetContextEvents = channel.unary_stream(
+                '/context.ContextService/GetContextEvents',
+                request_serializer=context__pb2.Empty.SerializeToString,
+                response_deserializer=context__pb2.ContextEvent.FromString,
+                )
+        self.ListTopologyIds = channel.unary_unary(
+                '/context.ContextService/ListTopologyIds',
+                request_serializer=context__pb2.ContextId.SerializeToString,
+                response_deserializer=context__pb2.TopologyIdList.FromString,
+                )
+        self.ListTopologies = channel.unary_unary(
+                '/context.ContextService/ListTopologies',
+                request_serializer=context__pb2.ContextId.SerializeToString,
+                response_deserializer=context__pb2.TopologyList.FromString,
+                )
+        self.GetTopology = channel.unary_unary(
+                '/context.ContextService/GetTopology',
+                request_serializer=context__pb2.TopologyId.SerializeToString,
+                response_deserializer=context__pb2.Topology.FromString,
+                )
+        self.GetTopologyDetails = channel.unary_unary(
+                '/context.ContextService/GetTopologyDetails',
+                request_serializer=context__pb2.TopologyId.SerializeToString,
+                response_deserializer=context__pb2.TopologyDetails.FromString,
+                )
+        self.SetTopology = channel.unary_unary(
+                '/context.ContextService/SetTopology',
+                request_serializer=context__pb2.Topology.SerializeToString,
+                response_deserializer=context__pb2.TopologyId.FromString,
+                )
+        self.RemoveTopology = channel.unary_unary(
+                '/context.ContextService/RemoveTopology',
+                request_serializer=context__pb2.TopologyId.SerializeToString,
+                response_deserializer=context__pb2.Empty.FromString,
+                )
+        self.GetTopologyEvents = channel.unary_stream(
+                '/context.ContextService/GetTopologyEvents',
+                request_serializer=context__pb2.Empty.SerializeToString,
+                response_deserializer=context__pb2.TopologyEvent.FromString,
+                )
+        self.ListDeviceIds = channel.unary_unary(
+                '/context.ContextService/ListDeviceIds',
+                request_serializer=context__pb2.Empty.SerializeToString,
+                response_deserializer=context__pb2.DeviceIdList.FromString,
+                )
+        self.ListDevices = channel.unary_unary(
+                '/context.ContextService/ListDevices',
+                request_serializer=context__pb2.Empty.SerializeToString,
+                response_deserializer=context__pb2.DeviceList.FromString,
+                )
+        self.GetDevice = channel.unary_unary(
+                '/context.ContextService/GetDevice',
+                request_serializer=context__pb2.DeviceId.SerializeToString,
+                response_deserializer=context__pb2.Device.FromString,
+                )
+        self.SetDevice = channel.unary_unary(
+                '/context.ContextService/SetDevice',
+                request_serializer=context__pb2.Device.SerializeToString,
+                response_deserializer=context__pb2.DeviceId.FromString,
+                )
+        self.RemoveDevice = channel.unary_unary(
+                '/context.ContextService/RemoveDevice',
+                request_serializer=context__pb2.DeviceId.SerializeToString,
+                response_deserializer=context__pb2.Empty.FromString,
+                )
+        self.GetDeviceEvents = channel.unary_stream(
+                '/context.ContextService/GetDeviceEvents',
+                request_serializer=context__pb2.Empty.SerializeToString,
+                response_deserializer=context__pb2.DeviceEvent.FromString,
+                )
+        self.SelectDevice = channel.unary_unary(
+                '/context.ContextService/SelectDevice',
+                request_serializer=context__pb2.DeviceFilter.SerializeToString,
+                response_deserializer=context__pb2.DeviceList.FromString,
+                )
+        self.ListEndPointNames = channel.unary_unary(
+                '/context.ContextService/ListEndPointNames',
+                request_serializer=context__pb2.EndPointIdList.SerializeToString,
+                response_deserializer=context__pb2.EndPointNameList.FromString,
+                )
+        self.ListLinkIds = channel.unary_unary(
+                '/context.ContextService/ListLinkIds',
+                request_serializer=context__pb2.Empty.SerializeToString,
+                response_deserializer=context__pb2.LinkIdList.FromString,
+                )
+        self.ListLinks = channel.unary_unary(
+                '/context.ContextService/ListLinks',
+                request_serializer=context__pb2.Empty.SerializeToString,
+                response_deserializer=context__pb2.LinkList.FromString,
+                )
+        self.GetLink = channel.unary_unary(
+                '/context.ContextService/GetLink',
+                request_serializer=context__pb2.LinkId.SerializeToString,
+                response_deserializer=context__pb2.Link.FromString,
+                )
+        self.SetLink = channel.unary_unary(
+                '/context.ContextService/SetLink',
+                request_serializer=context__pb2.Link.SerializeToString,
+                response_deserializer=context__pb2.LinkId.FromString,
+                )
+        self.RemoveLink = channel.unary_unary(
+                '/context.ContextService/RemoveLink',
+                request_serializer=context__pb2.LinkId.SerializeToString,
+                response_deserializer=context__pb2.Empty.FromString,
+                )
+        self.GetLinkEvents = channel.unary_stream(
+                '/context.ContextService/GetLinkEvents',
+                request_serializer=context__pb2.Empty.SerializeToString,
+                response_deserializer=context__pb2.LinkEvent.FromString,
+                )
+        self.ListServiceIds = channel.unary_unary(
+                '/context.ContextService/ListServiceIds',
+                request_serializer=context__pb2.ContextId.SerializeToString,
+                response_deserializer=context__pb2.ServiceIdList.FromString,
+                )
+        self.ListServices = channel.unary_unary(
+                '/context.ContextService/ListServices',
+                request_serializer=context__pb2.ContextId.SerializeToString,
+                response_deserializer=context__pb2.ServiceList.FromString,
+                )
+        self.GetService = channel.unary_unary(
+                '/context.ContextService/GetService',
+                request_serializer=context__pb2.ServiceId.SerializeToString,
+                response_deserializer=context__pb2.Service.FromString,
+                )
+        self.SetService = channel.unary_unary(
+                '/context.ContextService/SetService',
+                request_serializer=context__pb2.Service.SerializeToString,
+                response_deserializer=context__pb2.ServiceId.FromString,
+                )
+        self.UnsetService = channel.unary_unary(
+                '/context.ContextService/UnsetService',
+                request_serializer=context__pb2.Service.SerializeToString,
+                response_deserializer=context__pb2.ServiceId.FromString,
+                )
+        self.RemoveService = channel.unary_unary(
+                '/context.ContextService/RemoveService',
+                request_serializer=context__pb2.ServiceId.SerializeToString,
+                response_deserializer=context__pb2.Empty.FromString,
+                )
+        self.GetServiceEvents = channel.unary_stream(
+                '/context.ContextService/GetServiceEvents',
+                request_serializer=context__pb2.Empty.SerializeToString,
+                response_deserializer=context__pb2.ServiceEvent.FromString,
+                )
+        self.SelectService = channel.unary_unary(
+                '/context.ContextService/SelectService',
+                request_serializer=context__pb2.ServiceFilter.SerializeToString,
+                response_deserializer=context__pb2.ServiceList.FromString,
+                )
+        self.ListSliceIds = channel.unary_unary(
+                '/context.ContextService/ListSliceIds',
+                request_serializer=context__pb2.ContextId.SerializeToString,
+                response_deserializer=context__pb2.SliceIdList.FromString,
+                )
+        self.ListSlices = channel.unary_unary(
+                '/context.ContextService/ListSlices',
+                request_serializer=context__pb2.ContextId.SerializeToString,
+                response_deserializer=context__pb2.SliceList.FromString,
+                )
+        self.GetSlice = channel.unary_unary(
+                '/context.ContextService/GetSlice',
+                request_serializer=context__pb2.SliceId.SerializeToString,
+                response_deserializer=context__pb2.Slice.FromString,
+                )
+        self.SetSlice = channel.unary_unary(
+                '/context.ContextService/SetSlice',
+                request_serializer=context__pb2.Slice.SerializeToString,
+                response_deserializer=context__pb2.SliceId.FromString,
+                )
+        self.UnsetSlice = channel.unary_unary(
+                '/context.ContextService/UnsetSlice',
+                request_serializer=context__pb2.Slice.SerializeToString,
+                response_deserializer=context__pb2.SliceId.FromString,
+                )
+        self.RemoveSlice = channel.unary_unary(
+                '/context.ContextService/RemoveSlice',
+                request_serializer=context__pb2.SliceId.SerializeToString,
+                response_deserializer=context__pb2.Empty.FromString,
+                )
+        self.GetSliceEvents = channel.unary_stream(
+                '/context.ContextService/GetSliceEvents',
+                request_serializer=context__pb2.Empty.SerializeToString,
+                response_deserializer=context__pb2.SliceEvent.FromString,
+                )
+        self.SelectSlice = channel.unary_unary(
+                '/context.ContextService/SelectSlice',
+                request_serializer=context__pb2.SliceFilter.SerializeToString,
+                response_deserializer=context__pb2.SliceList.FromString,
+                )
+        self.ListConnectionIds = channel.unary_unary(
+                '/context.ContextService/ListConnectionIds',
+                request_serializer=context__pb2.ServiceId.SerializeToString,
+                response_deserializer=context__pb2.ConnectionIdList.FromString,
+                )
+        self.ListConnections = channel.unary_unary(
+                '/context.ContextService/ListConnections',
+                request_serializer=context__pb2.ServiceId.SerializeToString,
+                response_deserializer=context__pb2.ConnectionList.FromString,
+                )
+        self.GetConnection = channel.unary_unary(
+                '/context.ContextService/GetConnection',
+                request_serializer=context__pb2.ConnectionId.SerializeToString,
+                response_deserializer=context__pb2.Connection.FromString,
+                )
+        self.SetConnection = channel.unary_unary(
+                '/context.ContextService/SetConnection',
+                request_serializer=context__pb2.Connection.SerializeToString,
+                response_deserializer=context__pb2.ConnectionId.FromString,
+                )
+        self.RemoveConnection = channel.unary_unary(
+                '/context.ContextService/RemoveConnection',
+                request_serializer=context__pb2.ConnectionId.SerializeToString,
+                response_deserializer=context__pb2.Empty.FromString,
+                )
+        self.GetConnectionEvents = channel.unary_stream(
+                '/context.ContextService/GetConnectionEvents',
+                request_serializer=context__pb2.Empty.SerializeToString,
+                response_deserializer=context__pb2.ConnectionEvent.FromString,
+                )
+        self.GetOpticalConfig = channel.unary_unary(
+                '/context.ContextService/GetOpticalConfig',
+                request_serializer=context__pb2.Empty.SerializeToString,
+                response_deserializer=context__pb2.OpticalConfigList.FromString,
+                )
+        self.SetOpticalConfig = channel.unary_unary(
+                '/context.ContextService/SetOpticalConfig',
+                request_serializer=context__pb2.OpticalConfig.SerializeToString,
+                response_deserializer=context__pb2.OpticalConfigId.FromString,
+                )
+        self.SelectOpticalConfig = channel.unary_unary(
+                '/context.ContextService/SelectOpticalConfig',
+                request_serializer=context__pb2.OpticalConfigId.SerializeToString,
+                response_deserializer=context__pb2.OpticalConfig.FromString,
+                )
+        self.SetOpticalLink = channel.unary_unary(
+                '/context.ContextService/SetOpticalLink',
+                request_serializer=context__pb2.OpticalLink.SerializeToString,
+                response_deserializer=context__pb2.Empty.FromString,
+                )
+        self.GetOpticalLink = channel.unary_unary(
+                '/context.ContextService/GetOpticalLink',
+                request_serializer=context__pb2.OpticalLinkId.SerializeToString,
+                response_deserializer=context__pb2.OpticalLink.FromString,
+                )
+        self.GetFiber = channel.unary_unary(
+                '/context.ContextService/GetFiber',
+                request_serializer=context__pb2.FiberId.SerializeToString,
+                response_deserializer=context__pb2.Fiber.FromString,
+                )
+
+
+class ContextServiceServicer(object):
+    """Missing associated documentation comment in .proto file."""
+
+    def ListContextIds(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+    def ListContexts(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+    def GetContext(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+    def SetContext(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+    def RemoveContext(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+    def GetContextEvents(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+    def ListTopologyIds(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+    def ListTopologies(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+    def GetTopology(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+    def GetTopologyDetails(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+    def SetTopology(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+    def RemoveTopology(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+    def GetTopologyEvents(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+    def ListDeviceIds(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+    def ListDevices(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+    def GetDevice(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+    def SetDevice(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+    def RemoveDevice(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+    def GetDeviceEvents(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+    def SelectDevice(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+    def ListEndPointNames(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+    def ListLinkIds(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+    def ListLinks(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+    def GetLink(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+    def SetLink(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+    def RemoveLink(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+    def GetLinkEvents(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+    def ListServiceIds(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+    def ListServices(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+    def GetService(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+    def SetService(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+    def UnsetService(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+    def RemoveService(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+    def GetServiceEvents(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+    def SelectService(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+    def ListSliceIds(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+    def ListSlices(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+    def GetSlice(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+    def SetSlice(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+    def UnsetSlice(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+    def RemoveSlice(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+    def GetSliceEvents(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+    def SelectSlice(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+    def ListConnectionIds(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+    def ListConnections(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+    def GetConnection(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+    def SetConnection(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+    def RemoveConnection(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+    def GetConnectionEvents(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+    def GetOpticalConfig(self, request, context):
+        """------------------------------ Experimental -----------------------------
+        """
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+    def SetOpticalConfig(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+    def SelectOpticalConfig(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+    def SetOpticalLink(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+    def GetOpticalLink(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+    def GetFiber(self, request, context):
+        """Missing associated documentation comment in .proto file."""
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+
+def add_ContextServiceServicer_to_server(servicer, server):
+    rpc_method_handlers = {
+            'ListContextIds': grpc.unary_unary_rpc_method_handler(
+                    servicer.ListContextIds,
+                    request_deserializer=context__pb2.Empty.FromString,
+                    response_serializer=context__pb2.ContextIdList.SerializeToString,
+            ),
+            'ListContexts': grpc.unary_unary_rpc_method_handler(
+                    servicer.ListContexts,
+                    request_deserializer=context__pb2.Empty.FromString,
+                    response_serializer=context__pb2.ContextList.SerializeToString,
+            ),
+            'GetContext': grpc.unary_unary_rpc_method_handler(
+                    servicer.GetContext,
+                    request_deserializer=context__pb2.ContextId.FromString,
+                    response_serializer=context__pb2.Context.SerializeToString,
+            ),
+            'SetContext': grpc.unary_unary_rpc_method_handler(
+                    servicer.SetContext,
+                    request_deserializer=context__pb2.Context.FromString,
+                    response_serializer=context__pb2.ContextId.SerializeToString,
+            ),
+            'RemoveContext': grpc.unary_unary_rpc_method_handler(
+                    servicer.RemoveContext,
+                    request_deserializer=context__pb2.ContextId.FromString,
+                    response_serializer=context__pb2.Empty.SerializeToString,
+            ),
+            'GetContextEvents': grpc.unary_stream_rpc_method_handler(
+                    servicer.GetContextEvents,
+                    request_deserializer=context__pb2.Empty.FromString,
+                    response_serializer=context__pb2.ContextEvent.SerializeToString,
+            ),
+            'ListTopologyIds': grpc.unary_unary_rpc_method_handler(
+                    servicer.ListTopologyIds,
+                    request_deserializer=context__pb2.ContextId.FromString,
+                    response_serializer=context__pb2.TopologyIdList.SerializeToString,
+            ),
+            'ListTopologies': grpc.unary_unary_rpc_method_handler(
+                    servicer.ListTopologies,
+                    request_deserializer=context__pb2.ContextId.FromString,
+                    response_serializer=context__pb2.TopologyList.SerializeToString,
+            ),
+            'GetTopology': grpc.unary_unary_rpc_method_handler(
+                    servicer.GetTopology,
+                    request_deserializer=context__pb2.TopologyId.FromString,
+                    response_serializer=context__pb2.Topology.SerializeToString,
+            ),
+            'GetTopologyDetails': grpc.unary_unary_rpc_method_handler(
+                    servicer.GetTopologyDetails,
+                    request_deserializer=context__pb2.TopologyId.FromString,
+                    response_serializer=context__pb2.TopologyDetails.SerializeToString,
+            ),
+            'SetTopology': grpc.unary_unary_rpc_method_handler(
+                    servicer.SetTopology,
+                    request_deserializer=context__pb2.Topology.FromString,
+                    response_serializer=context__pb2.TopologyId.SerializeToString,
+            ),
+            'RemoveTopology': grpc.unary_unary_rpc_method_handler(
+                    servicer.RemoveTopology,
+                    request_deserializer=context__pb2.TopologyId.FromString,
+                    response_serializer=context__pb2.Empty.SerializeToString,
+            ),
+            'GetTopologyEvents': grpc.unary_stream_rpc_method_handler(
+                    servicer.GetTopologyEvents,
+                    request_deserializer=context__pb2.Empty.FromString,
+                    response_serializer=context__pb2.TopologyEvent.SerializeToString,
+            ),
+            'ListDeviceIds': grpc.unary_unary_rpc_method_handler(
+                    servicer.ListDeviceIds,
+                    request_deserializer=context__pb2.Empty.FromString,
+                    response_serializer=context__pb2.DeviceIdList.SerializeToString,
+            ),
+            'ListDevices': grpc.unary_unary_rpc_method_handler(
+                    servicer.ListDevices,
+                    request_deserializer=context__pb2.Empty.FromString,
+                    response_serializer=context__pb2.DeviceList.SerializeToString,
+            ),
+            'GetDevice': grpc.unary_unary_rpc_method_handler(
+                    servicer.GetDevice,
+                    request_deserializer=context__pb2.DeviceId.FromString,
+                    response_serializer=context__pb2.Device.SerializeToString,
+            ),
+            'SetDevice': grpc.unary_unary_rpc_method_handler(
+                    servicer.SetDevice,
+                    request_deserializer=context__pb2.Device.FromString,
+                    response_serializer=context__pb2.DeviceId.SerializeToString,
+            ),
+            'RemoveDevice': grpc.unary_unary_rpc_method_handler(
+                    servicer.RemoveDevice,
+                    request_deserializer=context__pb2.DeviceId.FromString,
+                    response_serializer=context__pb2.Empty.SerializeToString,
+            ),
+            'GetDeviceEvents': grpc.unary_stream_rpc_method_handler(
+                    servicer.GetDeviceEvents,
+                    request_deserializer=context__pb2.Empty.FromString,
+                    response_serializer=context__pb2.DeviceEvent.SerializeToString,
+            ),
+            'SelectDevice': grpc.unary_unary_rpc_method_handler(
+                    servicer.SelectDevice,
+                    request_deserializer=context__pb2.DeviceFilter.FromString,
+                    response_serializer=context__pb2.DeviceList.SerializeToString,
+            ),
+            'ListEndPointNames': grpc.unary_unary_rpc_method_handler(
+                    servicer.ListEndPointNames,
+                    request_deserializer=context__pb2.EndPointIdList.FromString,
+                    response_serializer=context__pb2.EndPointNameList.SerializeToString,
+            ),
+            'ListLinkIds': grpc.unary_unary_rpc_method_handler(
+                    servicer.ListLinkIds,
+                    request_deserializer=context__pb2.Empty.FromString,
+                    response_serializer=context__pb2.LinkIdList.SerializeToString,
+            ),
+            'ListLinks': grpc.unary_unary_rpc_method_handler(
+                    servicer.ListLinks,
+                    request_deserializer=context__pb2.Empty.FromString,
+                    response_serializer=context__pb2.LinkList.SerializeToString,
+            ),
+            'GetLink': grpc.unary_unary_rpc_method_handler(
+                    servicer.GetLink,
+                    request_deserializer=context__pb2.LinkId.FromString,
+                    response_serializer=context__pb2.Link.SerializeToString,
+            ),
+            'SetLink': grpc.unary_unary_rpc_method_handler(
+                    servicer.SetLink,
+                    request_deserializer=context__pb2.Link.FromString,
+                    response_serializer=context__pb2.LinkId.SerializeToString,
+            ),
+            'RemoveLink': grpc.unary_unary_rpc_method_handler(
+                    servicer.RemoveLink,
+                    request_deserializer=context__pb2.LinkId.FromString,
+                    response_serializer=context__pb2.Empty.SerializeToString,
+            ),
+            'GetLinkEvents': grpc.unary_stream_rpc_method_handler(
+                    servicer.GetLinkEvents,
+                    request_deserializer=context__pb2.Empty.FromString,
+                    response_serializer=context__pb2.LinkEvent.SerializeToString,
+            ),
+            'ListServiceIds': grpc.unary_unary_rpc_method_handler(
+                    servicer.ListServiceIds,
+                    request_deserializer=context__pb2.ContextId.FromString,
+                    response_serializer=context__pb2.ServiceIdList.SerializeToString,
+            ),
+            'ListServices': grpc.unary_unary_rpc_method_handler(
+                    servicer.ListServices,
+                    request_deserializer=context__pb2.ContextId.FromString,
+                    response_serializer=context__pb2.ServiceList.SerializeToString,
+            ),
+            'GetService': grpc.unary_unary_rpc_method_handler(
+                    servicer.GetService,
+                    request_deserializer=context__pb2.ServiceId.FromString,
+                    response_serializer=context__pb2.Service.SerializeToString,
+            ),
+            'SetService': grpc.unary_unary_rpc_method_handler(
+                    servicer.SetService,
+                    request_deserializer=context__pb2.Service.FromString,
+                    response_serializer=context__pb2.ServiceId.SerializeToString,
+            ),
+            'UnsetService': grpc.unary_unary_rpc_method_handler(
+                    servicer.UnsetService,
+                    request_deserializer=context__pb2.Service.FromString,
+                    response_serializer=context__pb2.ServiceId.SerializeToString,
+            ),
+            'RemoveService': grpc.unary_unary_rpc_method_handler(
+                    servicer.RemoveService,
+                    request_deserializer=context__pb2.ServiceId.FromString,
+                    response_serializer=context__pb2.Empty.SerializeToString,
+            ),
+            'GetServiceEvents': grpc.unary_stream_rpc_method_handler(
+                    servicer.GetServiceEvents,
+                    request_deserializer=context__pb2.Empty.FromString,
+                    response_serializer=context__pb2.ServiceEvent.SerializeToString,
+            ),
+            'SelectService': grpc.unary_unary_rpc_method_handler(
+                    servicer.SelectService,
+                    request_deserializer=context__pb2.ServiceFilter.FromString,
+                    response_serializer=context__pb2.ServiceList.SerializeToString,
+            ),
+            'ListSliceIds': grpc.unary_unary_rpc_method_handler(
+                    servicer.ListSliceIds,
+                    request_deserializer=context__pb2.ContextId.FromString,
+                    response_serializer=context__pb2.SliceIdList.SerializeToString,
+            ),
+            'ListSlices': grpc.unary_unary_rpc_method_handler(
+                    servicer.ListSlices,
+                    request_deserializer=context__pb2.ContextId.FromString,
+                    response_serializer=context__pb2.SliceList.SerializeToString,
+            ),
+            'GetSlice': grpc.unary_unary_rpc_method_handler(
+                    servicer.GetSlice,
+                    request_deserializer=context__pb2.SliceId.FromString,
+                    response_serializer=context__pb2.Slice.SerializeToString,
+            ),
+            'SetSlice': grpc.unary_unary_rpc_method_handler(
+                    servicer.SetSlice,
+                    request_deserializer=context__pb2.Slice.FromString,
+                    response_serializer=context__pb2.SliceId.SerializeToString,
+            ),
+            'UnsetSlice': grpc.unary_unary_rpc_method_handler(
+                    servicer.UnsetSlice,
+                    request_deserializer=context__pb2.Slice.FromString,
+                    response_serializer=context__pb2.SliceId.SerializeToString,
+            ),
+            'RemoveSlice': grpc.unary_unary_rpc_method_handler(
+                    servicer.RemoveSlice,
+                    request_deserializer=context__pb2.SliceId.FromString,
+                    response_serializer=context__pb2.Empty.SerializeToString,
+            ),
+            'GetSliceEvents': grpc.unary_stream_rpc_method_handler(
+                    servicer.GetSliceEvents,
+                    request_deserializer=context__pb2.Empty.FromString,
+                    response_serializer=context__pb2.SliceEvent.SerializeToString,
+            ),
+            'SelectSlice': grpc.unary_unary_rpc_method_handler(
+                    servicer.SelectSlice,
+                    request_deserializer=context__pb2.SliceFilter.FromString,
+                    response_serializer=context__pb2.SliceList.SerializeToString,
+            ),
+            'ListConnectionIds': grpc.unary_unary_rpc_method_handler(
+                    servicer.ListConnectionIds,
+                    request_deserializer=context__pb2.ServiceId.FromString,
+                    response_serializer=context__pb2.ConnectionIdList.SerializeToString,
+            ),
+            'ListConnections': grpc.unary_unary_rpc_method_handler(
+                    servicer.ListConnections,
+                    request_deserializer=context__pb2.ServiceId.FromString,
+                    response_serializer=context__pb2.ConnectionList.SerializeToString,
+            ),
+            'GetConnection': grpc.unary_unary_rpc_method_handler(
+                    servicer.GetConnection,
+                    request_deserializer=context__pb2.ConnectionId.FromString,
+                    response_serializer=context__pb2.Connection.SerializeToString,
+            ),
+            'SetConnection': grpc.unary_unary_rpc_method_handler(
+                    servicer.SetConnection,
+                    request_deserializer=context__pb2.Connection.FromString,
+                    response_serializer=context__pb2.ConnectionId.SerializeToString,
+            ),
+            'RemoveConnection': grpc.unary_unary_rpc_method_handler(
+                    servicer.RemoveConnection,
+                    request_deserializer=context__pb2.ConnectionId.FromString,
+                    response_serializer=context__pb2.Empty.SerializeToString,
+            ),
+            'GetConnectionEvents': grpc.unary_stream_rpc_method_handler(
+                    servicer.GetConnectionEvents,
+                    request_deserializer=context__pb2.Empty.FromString,
+                    response_serializer=context__pb2.ConnectionEvent.SerializeToString,
+            ),
+            'GetOpticalConfig': grpc.unary_unary_rpc_method_handler(
+                    servicer.GetOpticalConfig,
+                    request_deserializer=context__pb2.Empty.FromString,
+                    response_serializer=context__pb2.OpticalConfigList.SerializeToString,
+            ),
+            'SetOpticalConfig': grpc.unary_unary_rpc_method_handler(
+                    servicer.SetOpticalConfig,
+                    request_deserializer=context__pb2.OpticalConfig.FromString,
+                    response_serializer=context__pb2.OpticalConfigId.SerializeToString,
+            ),
+            'SelectOpticalConfig': grpc.unary_unary_rpc_method_handler(
+                    servicer.SelectOpticalConfig,
+                    request_deserializer=context__pb2.OpticalConfigId.FromString,
+                    response_serializer=context__pb2.OpticalConfig.SerializeToString,
+            ),
+            'SetOpticalLink': grpc.unary_unary_rpc_method_handler(
+                    servicer.SetOpticalLink,
+                    request_deserializer=context__pb2.OpticalLink.FromString,
+                    response_serializer=context__pb2.Empty.SerializeToString,
+            ),
+            'GetOpticalLink': grpc.unary_unary_rpc_method_handler(
+                    servicer.GetOpticalLink,
+                    request_deserializer=context__pb2.OpticalLinkId.FromString,
+                    response_serializer=context__pb2.OpticalLink.SerializeToString,
+            ),
+            'GetFiber': grpc.unary_unary_rpc_method_handler(
+                    servicer.GetFiber,
+                    request_deserializer=context__pb2.FiberId.FromString,
+                    response_serializer=context__pb2.Fiber.SerializeToString,
+            ),
+    }
+    generic_handler = grpc.method_handlers_generic_handler(
+            'context.ContextService', rpc_method_handlers)
+    server.add_generic_rpc_handlers((generic_handler,))
+
+
+ # This class is part of an EXPERIMENTAL API.
+class ContextService(object):
+    """Missing associated documentation comment in .proto file."""
+
+    @staticmethod
+    def ListContextIds(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_unary(request, target, '/context.ContextService/ListContextIds',
+            context__pb2.Empty.SerializeToString,
+            context__pb2.ContextIdList.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+    @staticmethod
+    def ListContexts(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_unary(request, target, '/context.ContextService/ListContexts',
+            context__pb2.Empty.SerializeToString,
+            context__pb2.ContextList.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+    @staticmethod
+    def GetContext(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_unary(request, target, '/context.ContextService/GetContext',
+            context__pb2.ContextId.SerializeToString,
+            context__pb2.Context.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+    @staticmethod
+    def SetContext(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_unary(request, target, '/context.ContextService/SetContext',
+            context__pb2.Context.SerializeToString,
+            context__pb2.ContextId.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+    @staticmethod
+    def RemoveContext(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_unary(request, target, '/context.ContextService/RemoveContext',
+            context__pb2.ContextId.SerializeToString,
+            context__pb2.Empty.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+    @staticmethod
+    def GetContextEvents(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_stream(request, target, '/context.ContextService/GetContextEvents',
+            context__pb2.Empty.SerializeToString,
+            context__pb2.ContextEvent.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+    @staticmethod
+    def ListTopologyIds(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_unary(request, target, '/context.ContextService/ListTopologyIds',
+            context__pb2.ContextId.SerializeToString,
+            context__pb2.TopologyIdList.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+    @staticmethod
+    def ListTopologies(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_unary(request, target, '/context.ContextService/ListTopologies',
+            context__pb2.ContextId.SerializeToString,
+            context__pb2.TopologyList.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+    @staticmethod
+    def GetTopology(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_unary(request, target, '/context.ContextService/GetTopology',
+            context__pb2.TopologyId.SerializeToString,
+            context__pb2.Topology.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+    @staticmethod
+    def GetTopologyDetails(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_unary(request, target, '/context.ContextService/GetTopologyDetails',
+            context__pb2.TopologyId.SerializeToString,
+            context__pb2.TopologyDetails.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+    @staticmethod
+    def SetTopology(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_unary(request, target, '/context.ContextService/SetTopology',
+            context__pb2.Topology.SerializeToString,
+            context__pb2.TopologyId.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+    @staticmethod
+    def RemoveTopology(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_unary(request, target, '/context.ContextService/RemoveTopology',
+            context__pb2.TopologyId.SerializeToString,
+            context__pb2.Empty.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+    @staticmethod
+    def GetTopologyEvents(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_stream(request, target, '/context.ContextService/GetTopologyEvents',
+            context__pb2.Empty.SerializeToString,
+            context__pb2.TopologyEvent.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+    @staticmethod
+    def ListDeviceIds(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_unary(request, target, '/context.ContextService/ListDeviceIds',
+            context__pb2.Empty.SerializeToString,
+            context__pb2.DeviceIdList.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+    @staticmethod
+    def ListDevices(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_unary(request, target, '/context.ContextService/ListDevices',
+            context__pb2.Empty.SerializeToString,
+            context__pb2.DeviceList.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+    @staticmethod
+    def GetDevice(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_unary(request, target, '/context.ContextService/GetDevice',
+            context__pb2.DeviceId.SerializeToString,
+            context__pb2.Device.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+    @staticmethod
+    def SetDevice(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_unary(request, target, '/context.ContextService/SetDevice',
+            context__pb2.Device.SerializeToString,
+            context__pb2.DeviceId.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+    @staticmethod
+    def RemoveDevice(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_unary(request, target, '/context.ContextService/RemoveDevice',
+            context__pb2.DeviceId.SerializeToString,
+            context__pb2.Empty.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+    @staticmethod
+    def GetDeviceEvents(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_stream(request, target, '/context.ContextService/GetDeviceEvents',
+            context__pb2.Empty.SerializeToString,
+            context__pb2.DeviceEvent.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+    @staticmethod
+    def SelectDevice(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_unary(request, target, '/context.ContextService/SelectDevice',
+            context__pb2.DeviceFilter.SerializeToString,
+            context__pb2.DeviceList.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+    @staticmethod
+    def ListEndPointNames(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_unary(request, target, '/context.ContextService/ListEndPointNames',
+            context__pb2.EndPointIdList.SerializeToString,
+            context__pb2.EndPointNameList.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+    @staticmethod
+    def ListLinkIds(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_unary(request, target, '/context.ContextService/ListLinkIds',
+            context__pb2.Empty.SerializeToString,
+            context__pb2.LinkIdList.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+    @staticmethod
+    def ListLinks(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_unary(request, target, '/context.ContextService/ListLinks',
+            context__pb2.Empty.SerializeToString,
+            context__pb2.LinkList.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+    @staticmethod
+    def GetLink(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_unary(request, target, '/context.ContextService/GetLink',
+            context__pb2.LinkId.SerializeToString,
+            context__pb2.Link.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+    @staticmethod
+    def SetLink(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_unary(request, target, '/context.ContextService/SetLink',
+            context__pb2.Link.SerializeToString,
+            context__pb2.LinkId.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+    @staticmethod
+    def RemoveLink(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_unary(request, target, '/context.ContextService/RemoveLink',
+            context__pb2.LinkId.SerializeToString,
+            context__pb2.Empty.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+    @staticmethod
+    def GetLinkEvents(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_stream(request, target, '/context.ContextService/GetLinkEvents',
+            context__pb2.Empty.SerializeToString,
+            context__pb2.LinkEvent.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+    @staticmethod
+    def ListServiceIds(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_unary(request, target, '/context.ContextService/ListServiceIds',
+            context__pb2.ContextId.SerializeToString,
+            context__pb2.ServiceIdList.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+    @staticmethod
+    def ListServices(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_unary(request, target, '/context.ContextService/ListServices',
+            context__pb2.ContextId.SerializeToString,
+            context__pb2.ServiceList.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+    @staticmethod
+    def GetService(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_unary(request, target, '/context.ContextService/GetService',
+            context__pb2.ServiceId.SerializeToString,
+            context__pb2.Service.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+    @staticmethod
+    def SetService(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_unary(request, target, '/context.ContextService/SetService',
+            context__pb2.Service.SerializeToString,
+            context__pb2.ServiceId.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+    @staticmethod
+    def UnsetService(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_unary(request, target, '/context.ContextService/UnsetService',
+            context__pb2.Service.SerializeToString,
+            context__pb2.ServiceId.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+    @staticmethod
+    def RemoveService(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_unary(request, target, '/context.ContextService/RemoveService',
+            context__pb2.ServiceId.SerializeToString,
+            context__pb2.Empty.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+    @staticmethod
+    def GetServiceEvents(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_stream(request, target, '/context.ContextService/GetServiceEvents',
+            context__pb2.Empty.SerializeToString,
+            context__pb2.ServiceEvent.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+    @staticmethod
+    def SelectService(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_unary(request, target, '/context.ContextService/SelectService',
+            context__pb2.ServiceFilter.SerializeToString,
+            context__pb2.ServiceList.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+    @staticmethod
+    def ListSliceIds(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_unary(request, target, '/context.ContextService/ListSliceIds',
+            context__pb2.ContextId.SerializeToString,
+            context__pb2.SliceIdList.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+    @staticmethod
+    def ListSlices(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_unary(request, target, '/context.ContextService/ListSlices',
+            context__pb2.ContextId.SerializeToString,
+            context__pb2.SliceList.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+    @staticmethod
+    def GetSlice(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_unary(request, target, '/context.ContextService/GetSlice',
+            context__pb2.SliceId.SerializeToString,
+            context__pb2.Slice.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+    @staticmethod
+    def SetSlice(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_unary(request, target, '/context.ContextService/SetSlice',
+            context__pb2.Slice.SerializeToString,
+            context__pb2.SliceId.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+    @staticmethod
+    def UnsetSlice(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_unary(request, target, '/context.ContextService/UnsetSlice',
+            context__pb2.Slice.SerializeToString,
+            context__pb2.SliceId.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+    @staticmethod
+    def RemoveSlice(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_unary(request, target, '/context.ContextService/RemoveSlice',
+            context__pb2.SliceId.SerializeToString,
+            context__pb2.Empty.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+    @staticmethod
+    def GetSliceEvents(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_stream(request, target, '/context.ContextService/GetSliceEvents',
+            context__pb2.Empty.SerializeToString,
+            context__pb2.SliceEvent.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+    @staticmethod
+    def SelectSlice(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_unary(request, target, '/context.ContextService/SelectSlice',
+            context__pb2.SliceFilter.SerializeToString,
+            context__pb2.SliceList.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+    @staticmethod
+    def ListConnectionIds(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_unary(request, target, '/context.ContextService/ListConnectionIds',
+            context__pb2.ServiceId.SerializeToString,
+            context__pb2.ConnectionIdList.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+    @staticmethod
+    def ListConnections(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_unary(request, target, '/context.ContextService/ListConnections',
+            context__pb2.ServiceId.SerializeToString,
+            context__pb2.ConnectionList.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+    @staticmethod
+    def GetConnection(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_unary(request, target, '/context.ContextService/GetConnection',
+            context__pb2.ConnectionId.SerializeToString,
+            context__pb2.Connection.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+    @staticmethod
+    def SetConnection(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_unary(request, target, '/context.ContextService/SetConnection',
+            context__pb2.Connection.SerializeToString,
+            context__pb2.ConnectionId.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+    @staticmethod
+    def RemoveConnection(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_unary(request, target, '/context.ContextService/RemoveConnection',
+            context__pb2.ConnectionId.SerializeToString,
+            context__pb2.Empty.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+    @staticmethod
+    def GetConnectionEvents(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_stream(request, target, '/context.ContextService/GetConnectionEvents',
+            context__pb2.Empty.SerializeToString,
+            context__pb2.ConnectionEvent.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+    @staticmethod
+    def GetOpticalConfig(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_unary(request, target, '/context.ContextService/GetOpticalConfig',
+            context__pb2.Empty.SerializeToString,
+            context__pb2.OpticalConfigList.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+    @staticmethod
+    def SetOpticalConfig(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_unary(request, target, '/context.ContextService/SetOpticalConfig',
+            context__pb2.OpticalConfig.SerializeToString,
+            context__pb2.OpticalConfigId.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+    @staticmethod
+    def SelectOpticalConfig(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_unary(request, target, '/context.ContextService/SelectOpticalConfig',
+            context__pb2.OpticalConfigId.SerializeToString,
+            context__pb2.OpticalConfig.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+    @staticmethod
+    def SetOpticalLink(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_unary(request, target, '/context.ContextService/SetOpticalLink',
+            context__pb2.OpticalLink.SerializeToString,
+            context__pb2.Empty.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+    @staticmethod
+    def GetOpticalLink(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_unary(request, target, '/context.ContextService/GetOpticalLink',
+            context__pb2.OpticalLinkId.SerializeToString,
+            context__pb2.OpticalLink.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+
+    @staticmethod
+    def GetFiber(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_unary(request, target, '/context.ContextService/GetFiber',
+            context__pb2.FiberId.SerializeToString,
+            context__pb2.Fiber.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
diff --git a/src/device/tests/qkd/unit/retrieve_device_mock_information.py b/src/device/tests/qkd/unit/retrieve_device_mock_information.py
new file mode 100644
index 0000000000000000000000000000000000000000..20074924b72eb1557a6af72221e634a4cfe54346
--- /dev/null
+++ b/src/device/tests/qkd/unit/retrieve_device_mock_information.py
@@ -0,0 +1,70 @@
+import unittest
+from unittest.mock import patch, MagicMock
+from context.client.ContextClient import ContextClient
+from common.proto.context_pb2 import Empty
+
+def retrieve_descriptor_information():
+    client = ContextClient()
+    contexts = client.ListContexts(Empty())
+    topologies = client.ListTopologies(contexts.contexts[0].context_id)
+    devices = client.ListDevices(Empty())
+    links = client.ListLinks(Empty())
+
+    return {
+        'contexts': contexts,
+        'topologies': topologies,
+        'devices': devices,
+        'links': links,
+    }
+
+class TestRetrieveDescriptorInformation(unittest.TestCase):
+
+    @patch('context.client.ContextClient.ContextClient')
+    def test_retrieve_descriptor_information(self, MockContextClient):
+        # Setup mock responses
+        mock_client = MagicMock()
+        MockContextClient.return_value = mock_client
+
+        # Mocking ListContexts response
+        context_mock = MagicMock()
+        context_mock.contexts = [MagicMock()]
+        context_mock.contexts[0].context_id.context_uuid.uuid = "admin"
+        mock_client.ListContexts.return_value = context_mock
+
+        # Mocking ListTopologies response
+        topology_mock = MagicMock()
+        topology_mock.topologies = [MagicMock()]
+        topology_mock.topologies[0].topology_id.topology_uuid.uuid = "admin"
+        mock_client.ListTopologies.return_value = topology_mock
+
+        # Mocking ListDevices response
+        device_mock = MagicMock()
+        device_mock.devices = [MagicMock()]
+        device_mock.devices[0].device_id.device_uuid.uuid = "QKD1"
+        device_mock.devices[0].device_type = "qkd-node"
+        device_mock.devices[0].device_operational_status = 0
+        device_mock.devices[0].device_drivers = [12]
+        mock_client.ListDevices.return_value = device_mock
+
+        # Mocking ListLinks response
+        link_mock = MagicMock()
+        link_mock.links = [MagicMock()]
+        link_mock.links[0].link_id.link_uuid.uuid = "QKD1/10.211.36.220:1001==QKD2/10.211.36.220:2001"
+        mock_client.ListLinks.return_value = link_mock
+
+        # Call the function and verify
+        result = retrieve_descriptor_information()
+
+        mock_client.ListContexts.assert_called_once_with(Empty())
+        mock_client.ListTopologies.assert_called_once_with(context_mock.contexts[0].context_id)
+        mock_client.ListDevices.assert_called_once_with(Empty())
+        mock_client.ListLinks.assert_called_once_with(Empty())
+
+        # Assertions to verify the expected structure
+        self.assertEqual(result['contexts'].contexts[0].context_id.context_uuid.uuid, "admin")
+        self.assertEqual(result['topologies'].topologies[0].topology_id.topology_uuid.uuid, "admin")
+        self.assertEqual(result['devices'].devices[0].device_id.device_uuid.uuid, "QKD1")
+        self.assertEqual(result['links'].links[0].link_id.link_uuid.uuid, "QKD1/10.211.36.220:1001==QKD2/10.211.36.220:2001")
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/src/device/tests/qkd/unit/retrieve_qkd_information.py b/src/device/tests/qkd/unit/retrieve_qkd_information.py
new file mode 100644
index 0000000000000000000000000000000000000000..10db6a3701cb50e3ff9ebf6f024c3ac4c69d05cf
--- /dev/null
+++ b/src/device/tests/qkd/unit/retrieve_qkd_information.py
@@ -0,0 +1,348 @@
+import grpc
+from common.proto.context_pb2 import Empty, ContextId, TopologyId, Uuid
+from common.proto.context_pb2_grpc import ContextServiceStub
+import unittest
+from unittest.mock import patch, MagicMock
+
+def get_context_topology_info():
+    # Establish a gRPC channel
+    channel = grpc.insecure_channel('10.152.183.77:1010')  # Update with the correct IP and port
+    stub = ContextServiceStub(channel)
+
+    # Retrieve the context information
+    context_list = stub.ListContexts(Empty())
+    contexts_info = []
+    for context in context_list.contexts:
+        context_info = {
+            'context_id': context.context_id.context_uuid.uuid,
+            'context_name': context.name,
+            'topologies': []
+        }
+
+        # Retrieve topology information for each context
+        topology_list = stub.ListTopologies(context.context_id)
+        for topology in topology_list.topologies:
+            topology_info = {
+                'topology_id': topology.topology_id.topology_uuid.uuid,
+                'topology_name': topology.name,
+                'devices': []
+            }
+
+            # Retrieve detailed topology information
+            topology_details = stub.GetTopologyDetails(topology.topology_id)
+            for device in topology_details.devices:
+                device_info = {
+                    'device_id': device.device_id.device_uuid.uuid,
+                    'device_name': device.name,
+                    'device_type': device.device_type,
+                    'status': device.device_operational_status,
+                    'drivers': [driver for driver in device.device_drivers],
+                    'endpoints': [{
+                        'uuid': endpoint.endpoint_id.endpoint_uuid.uuid,
+                        'name': endpoint.name,
+                        'type': endpoint.endpoint_type,
+                        'location': endpoint.endpoint_location
+                    } for endpoint in device.device_endpoints],
+                    'configurations': [{
+                        'key': config.custom.resource_key,
+                        'value': config.custom.resource_value
+                    } for config in device.device_config.config_rules],
+                    'interfaces': [{
+                        'id': interface.qkdi_id,
+                        'enabled': interface.enabled,
+                        'name': interface.name,
+                        'att_point': interface.qkdi_att_point,
+                        'capabilities': interface.qkdi_capabilities
+                    } for interface in device.qkd_interfaces.qkd_interface],
+                    'applications': [{
+                        'app_id': app.app_id,
+                        'app_qos': app.app_qos,
+                        'app_statistics': app.app_statistics,
+                        'backing_qkdl_id': app.backing_qkdl_id,
+                        'client_app_id': app.client_app_id
+                    } for app in device.qkd_applications.qkd_app]
+                }
+                topology_info['devices'].append(device_info)
+            context_info['topologies'].append(topology_info)
+        contexts_info.append(context_info)
+    
+    return contexts_info
+
+def get_detailed_device_info():
+    context_info = get_context_topology_info()
+    detailed_info = []
+    for context in context_info:
+        if context['context_name'] == 'admin':
+            for topology in context['topologies']:
+                if topology['topology_name'] == 'admin':
+                    detailed_info.extend(topology['devices'])
+    print("Detailed Device Info:", detailed_info)  # Print the detailed device information
+    return detailed_info
+
+
+class TestRetrieveQKDInformation(unittest.TestCase):
+
+    @patch('retrieve_qkd_information.ContextServiceStub')
+    def test_get_detailed_device_info(self, MockContextServiceStub):
+        # Mocking the gRPC channel and stubs
+        mock_stub = MagicMock()
+        MockContextServiceStub.return_value = mock_stub
+
+        # Create a mock response for ListContexts
+        context_id = ContextId(context_uuid=Uuid(uuid="43813baf-195e-5da6-af20-b3d0922e71a7"))
+        context_response = MagicMock()
+        mock_context = MagicMock()
+        mock_context.context_id = context_id
+        mock_context.name = "admin"
+        context_response.contexts = [mock_context]
+
+        # Create a mock response for ListTopologies
+        topology_id = TopologyId(topology_uuid=Uuid(uuid="c76135e3-24a8-5e92-9bed-c3c9139359c8"))
+        topology_response = MagicMock()
+        mock_topology = MagicMock()
+        mock_topology.topology_id = topology_id
+        mock_topology.name = "admin"
+        topology_response.topologies = [mock_topology]
+
+        # Create a mock response for GetTopologyDetails
+        device1 = MagicMock()
+        device1.device_id.device_uuid.uuid = "40e6c9e2-fdc8-5802-8361-413286c03494"
+        device1.name = "QKD2"
+        device1.device_type = "qkd-node"
+        device1.device_operational_status = "ENABLED"
+        device1.device_drivers = ["QKD"]
+        
+        endpoint1_1 = MagicMock()
+        endpoint1_1.endpoint_id.endpoint_uuid.uuid = "97b3b8e2-0e3e-5271-bc1e-ab2600b17fbd"
+        endpoint1_1.name = "10.211.36.220:2001"
+        endpoint1_1.endpoint_type = "-"
+        endpoint1_1.endpoint_location = ""
+
+        endpoint1_2 = MagicMock()
+        endpoint1_2.endpoint_id.endpoint_uuid.uuid = "bcb1cc4b-9208-54d1-bb70-8039871dd820"
+        endpoint1_2.name = "10.211.36.220:2002"
+        endpoint1_2.endpoint_type = "-"
+        endpoint1_2.endpoint_location = ""
+
+        device1.device_endpoints = [endpoint1_1, endpoint1_2]
+
+        config_rule1_1 = MagicMock()
+        config_rule1_1.custom.resource_key = "_connect/address"
+        config_rule1_1.custom.resource_value = "10.211.36.220"
+
+        config_rule1_2 = MagicMock()
+        config_rule1_2.custom.resource_key = "_connect/port"
+        config_rule1_2.custom.resource_value = "22222"
+
+        config_rule1_3 = MagicMock()
+        config_rule1_3.custom.resource_key = "_connect/settings"
+        config_rule1_3.custom.resource_value = "scheme: http"
+
+        device1.device_config.config_rules = [config_rule1_1, config_rule1_2, config_rule1_3]
+
+        interface1_1 = MagicMock()
+        interface1_1.qkdi_id = "200"
+        interface1_1.enabled = True
+        interface1_1.name = "10.211.36.220:2001"
+        interface1_1.qkdi_att_point = {'device': '10.211.36.220', 'port': '2001'}
+        interface1_1.qkdi_capabilities = {}
+
+        interface1_2 = MagicMock()
+        interface1_2.qkdi_id = "201"
+        interface1_2.enabled = True
+        interface1_2.name = "10.211.36.220:2002"
+        interface1_2.qkdi_att_point = {'device': '10.211.36.220', 'port': '2002'}
+        interface1_2.qkdi_capabilities = {}
+
+        device1.qkd_interfaces.qkd_interface = [interface1_1, interface1_2]
+
+        app1_1 = MagicMock()
+        app1_1.app_id = "00000002-0001-0000-0000-000000000000"
+        app1_1.app_qos = {}
+        app1_1.app_statistics = {'statistics': []}
+        app1_1.backing_qkdl_id = []
+        app1_1.client_app_id = []
+
+        device1.qkd_applications.qkd_app = [app1_1]
+
+        # Repeat similar structure for device2 and device3
+        device2 = MagicMock()
+        device2.device_id.device_uuid.uuid = "456e461e-1de7-569a-999f-73903e818e4c"
+        device2.name = "QKD3"
+        device2.device_type = "qkd-node"
+        device2.device_operational_status = "ENABLED"
+        device2.device_drivers = ["QKD"]
+
+        endpoint2_1 = MagicMock()
+        endpoint2_1.endpoint_id.endpoint_uuid.uuid = "73b56f99-52f3-5af9-a7fd-cdd6e94fb289"
+        endpoint2_1.name = "10.211.36.220:3001"
+        endpoint2_1.endpoint_type = "-"
+        endpoint2_1.endpoint_location = ""
+
+        device2.device_endpoints = [endpoint2_1]
+
+        config_rule2_1 = MagicMock()
+        config_rule2_1.custom.resource_key = "_connect/address"
+        config_rule2_1.custom.resource_value = "10.211.36.220"
+
+        config_rule2_2 = MagicMock()
+        config_rule2_2.custom.resource_key = "_connect/port"
+        config_rule2_2.custom.resource_value = "33333"
+
+        config_rule2_3 = MagicMock()
+        config_rule2_3.custom.resource_key = "_connect/settings"
+        config_rule2_3.custom.resource_value = "scheme: http"
+
+        device2.device_config.config_rules = [config_rule2_1, config_rule2_2, config_rule2_3]
+
+        interface2_1 = MagicMock()
+        interface2_1.qkdi_id = "300"
+        interface2_1.enabled = True
+        interface2_1.name = "10.211.36.220:3001"
+        interface2_1.qkdi_att_point = {'device': '10.211.36.220', 'port': '3001'}
+        interface2_1.qkdi_capabilities = {}
+
+        device2.qkd_interfaces.qkd_interface = [interface2_1]
+
+        app2_1 = MagicMock()
+        app2_1.app_id = "00000003-0001-0000-0000-000000000000"
+        app2_1.app_qos = {}
+        app2_1.app_statistics = {'statistics': []}
+        app2_1.backing_qkdl_id = []
+        app2_1.client_app_id = []
+
+        device2.qkd_applications.qkd_app = [app2_1]
+
+        device3 = MagicMock()
+        device3.device_id.device_uuid.uuid = "74520336-c12f-545e-9e18-15319f987352"
+        device3.name = "QKD1"
+        device3.device_type = "qkd-node"
+        device3.device_operational_status = "ENABLED"
+        device3.device_drivers = ["QKD"]
+
+        endpoint3_1 = MagicMock()
+        endpoint3_1.endpoint_id.endpoint_uuid.uuid = "197a413f-5051-5241-81b7-ea4f89f0a0fc"
+        endpoint3_1.name = "10.211.36.220:1001"
+        endpoint3_1.endpoint_type = "-"
+        endpoint3_1.endpoint_location = ""
+
+        device3.device_endpoints = [endpoint3_1]
+
+        config_rule3_1 = MagicMock()
+        config_rule3_1.custom.resource_key = "_connect/address"
+        config_rule3_1.custom.resource_value = "10.211.36.220"
+
+        config_rule3_2 = MagicMock()
+        config_rule3_2.custom.resource_key = "_connect/port"
+        config_rule3_2.custom.resource_value = "11111"
+
+        config_rule3_3 = MagicMock()
+        config_rule3_3.custom.resource_key = "_connect/settings"
+        config_rule3_3.custom.resource_value = "scheme: http"
+
+        device3.device_config.config_rules = [config_rule3_1, config_rule3_2, config_rule3_3]
+
+        interface3_1 = MagicMock()
+        interface3_1.qkdi_id = "100"
+        interface3_1.enabled = True
+        interface3_1.name = "10.211.36.220:1001"
+        interface3_1.qkdi_att_point = {'device': '10.211.36.220', 'port': '1001'}
+        interface3_1.qkdi_capabilities = {}
+
+        device3.qkd_interfaces.qkd_interface = [interface3_1]
+
+        app3_1 = MagicMock()
+        app3_1.app_id = "00000001-0001-0000-0000-000000000000"
+        app3_1.app_qos = {}
+        app3_1.app_statistics = {'statistics': []}
+        app3_1.backing_qkdl_id = []
+        app3_1.client_app_id = []
+
+        device3.qkd_applications.qkd_app = [app3_1]
+
+        topology_details_response = MagicMock(devices=[device1, device2, device3])
+
+        # Set up the mock return values
+        mock_stub.ListContexts.return_value = context_response
+        mock_stub.ListTopologies.return_value = topology_response
+        mock_stub.GetTopologyDetails.return_value = topology_details_response
+
+        # Run the function to test
+        detailed_info = get_detailed_device_info()
+
+        # Print the detailed information for testing purposes
+        print("Test Detailed Info:", detailed_info)
+
+        # Assertions
+        mock_stub.ListContexts.assert_called_once_with(Empty())
+        mock_stub.ListTopologies.assert_called_once_with(context_id)
+        mock_stub.GetTopologyDetails.assert_called_once_with(topology_id)
+
+        # Check the returned information
+        expected_info = [{
+            'device_id': '40e6c9e2-fdc8-5802-8361-413286c03494',
+            'device_name': 'QKD2',
+            'device_type': 'qkd-node',
+            'status': 'ENABLED',
+            'drivers': ['QKD'],
+            'endpoints': [
+                {'uuid': '97b3b8e2-0e3e-5271-bc1e-ab2600b17fbd', 'name': '10.211.36.220:2001', 'type': '-', 'location': ''},
+                {'uuid': 'bcb1cc4b-9208-54d1-bb70-8039871dd820', 'name': '10.211.36.220:2002', 'type': '-', 'location': ''}
+            ],
+            'configurations': [
+                {'key': '_connect/address', 'value': '10.211.36.220'},
+                {'key': '_connect/port', 'value': '22222'},
+                {'key': '_connect/settings', 'value': 'scheme: http'}
+            ],
+            'interfaces': [
+                {'id': '200', 'enabled': True, 'name': '10.211.36.220:2001', 'att_point': {'device': '10.211.36.220', 'port': '2001'}, 'capabilities': {}},
+                {'id': '201', 'enabled': True, 'name': '10.211.36.220:2002', 'att_point': {'device': '10.211.36.220', 'port': '2002'}, 'capabilities': {}}
+            ],
+            'applications': [
+                {'app_id': '00000002-0001-0000-0000-000000000000', 'app_qos': {}, 'app_statistics': {'statistics': []}, 'backing_qkdl_id': [], 'client_app_id': []}
+            ]
+        }, {
+            'device_id': '456e461e-1de7-569a-999f-73903e818e4c',
+            'device_name': 'QKD3',
+            'device_type': 'qkd-node',
+            'status': 'ENABLED',
+            'drivers': ['QKD'],
+            'endpoints': [
+                {'uuid': '73b56f99-52f3-5af9-a7fd-cdd6e94fb289', 'name': '10.211.36.220:3001', 'type': '-', 'location': ''}
+            ],
+            'configurations': [
+                {'key': '_connect/address', 'value': '10.211.36.220'},
+                {'key': '_connect/port', 'value': '33333'},
+                {'key': '_connect/settings', 'value': 'scheme: http'}
+            ],
+            'interfaces': [
+                {'id': '300', 'enabled': True, 'name': '10.211.36.220:3001', 'att_point': {'device': '10.211.36.220', 'port': '3001'}, 'capabilities': {}}
+            ],
+            'applications': [
+                {'app_id': '00000003-0001-0000-0000-000000000000', 'app_qos': {}, 'app_statistics': {'statistics': []}, 'backing_qkdl_id': [], 'client_app_id': []}
+            ]
+        }, {
+            'device_id': '74520336-c12f-545e-9e18-15319f987352',
+            'device_name': 'QKD1',
+            'device_type': 'qkd-node',
+            'status': 'ENABLED',
+            'drivers': ['QKD'],
+            'endpoints': [
+                {'uuid': '197a413f-5051-5241-81b7-ea4f89f0a0fc', 'name': '10.211.36.220:1001', 'type': '-', 'location': ''}
+            ],
+            'configurations': [
+                {'key': '_connect/address', 'value': '10.211.36.220'},
+                {'key': '_connect/port', 'value': '11111'},
+                {'key': '_connect/settings', 'value': 'scheme: http'}
+            ],
+            'interfaces': [
+                {'id': '100', 'enabled': True, 'name': '10.211.36.220:1001', 'att_point': {'device': '10.211.36.220', 'port': '1001'}, 'capabilities': {}}
+            ],
+            'applications': [
+                {'app_id': '00000001-0001-0000-0000-000000000000', 'app_qos': {}, 'app_statistics': {'statistics': []}, 'backing_qkdl_id': [], 'client_app_id': []}
+            ]
+        }]
+        self.assertEqual(detailed_info, expected_info)
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/src/device/tests/qkd/unit/test_mock_qkd_node.py b/src/device/tests/qkd/unit/test_mock_qkd_node.py
new file mode 100644
index 0000000000000000000000000000000000000000..aee118e8f63552e9e709cc8958b002639cf1ade7
--- /dev/null
+++ b/src/device/tests/qkd/unit/test_mock_qkd_node.py
@@ -0,0 +1,17 @@
+import pytest
+import requests
+from requests.exceptions import ConnectionError
+
+def test_mock_qkd_node_responses():
+    response = requests.get('http://10.211.36.220:11111/restconf/data/etsi-qkd-sdn-node:qkd_node')
+    assert response.status_code == 200
+    data = response.json()
+    assert 'qkd_node' in data
+
+def test_mock_node_failure_scenarios():
+    try:
+        response = requests.get('http://10.211.36.220:12345/restconf/data/etsi-qkd-sdn-node:qkd_node')
+    except ConnectionError as e:
+        assert isinstance(e, ConnectionError)
+    else:
+        pytest.fail("ConnectionError not raised as expected")
diff --git a/src/device/tests/qkd/unit/test_qkd_compliance.py b/src/device/tests/qkd/unit/test_qkd_compliance.py
new file mode 100644
index 0000000000000000000000000000000000000000..0b624d923874121e99638c8cf8c666e2b3316c72
--- /dev/null
+++ b/src/device/tests/qkd/unit/test_qkd_compliance.py
@@ -0,0 +1,10 @@
+
+import pytest
+import requests
+from tests.tools.mock_qkd_nodes.YangValidator import YangValidator
+
+def test_compliance_with_yang_models():
+    validator = YangValidator('etsi-qkd-sdn-node', ['etsi-qkd-node-types'])
+    response = requests.get('http://10.211.36.220:11111/restconf/data/etsi-qkd-sdn-node:qkd_node')
+    data = response.json()
+    assert validator.parse_to_dict(data) is not None
diff --git a/src/device/tests/qkd/unit/test_qkd_configuration.py b/src/device/tests/qkd/unit/test_qkd_configuration.py
new file mode 100644
index 0000000000000000000000000000000000000000..179e072fa53aae999939968fa2e2b6db8eec2301
--- /dev/null
+++ b/src/device/tests/qkd/unit/test_qkd_configuration.py
@@ -0,0 +1,93 @@
+import pytest
+from src.device.service.drivers.qkd.QKDDriver2 import QKDDriver
+import json
+
+@pytest.fixture
+def qkd_driver():
+    return QKDDriver(address='10.211.36.220', port=11111, username='user', password='pass')
+
+# Deliverable Test ID: SBI_Test_03 (Initial Config Retrieval)
+def test_initial_config_retrieval(qkd_driver):
+    qkd_driver.Connect()
+    
+    # Retrieve and print initial config
+    config = qkd_driver.GetInitialConfig()
+    print("Initial Config:", json.dumps(config, indent=2))
+    
+    assert isinstance(config, list)
+    assert len(config) > 0
+    assert isinstance(config[0], tuple)
+    assert config[0][0] == 'qkd_node'
+    assert isinstance(config[0][1], dict)
+
+# Deliverable Test ID: INT_LQ_Test_04 (QKD Links Retrieval)
+def test_retrieve_links(qkd_driver):
+    qkd_driver.Connect()
+    
+    # Retrieve and print link information
+    links = qkd_driver.GetConfig(['links'])
+    
+    if not links:
+        pytest.fail("No links found in the system.")
+    
+    if isinstance(links[0][1], Exception):
+        print(f"Error retrieving links: {links[0][1]}")
+    else:
+        print("Links:", json.dumps(links, indent=2))
+    
+    assert isinstance(links, list)
+    assert len(links) > 0
+
+# Deliverable Test ID: INT_LQ_Test_03 (QKD Interfaces Retrieval)
+def test_retrieve_interfaces(qkd_driver):
+    qkd_driver.Connect()
+    
+    # Retrieve and print interface information
+    interfaces = qkd_driver.GetConfig(['interfaces'])
+    
+    if not interfaces:
+        pytest.fail("No interfaces found in the system.")
+    
+    if isinstance(interfaces[0][1], Exception):
+        print(f"Error retrieving interfaces: {interfaces[0][1]}")
+    else:
+        print("Interfaces:", json.dumps(interfaces, indent=2))
+    
+    assert isinstance(interfaces, list)
+    assert len(interfaces) > 0
+
+# Deliverable Test ID: INT_LQ_Test_02 (QKD Capabilities Retrieval)
+def test_retrieve_capabilities(qkd_driver):
+    qkd_driver.Connect()
+    
+    # Retrieve and print capabilities information
+    capabilities = qkd_driver.GetConfig(['capabilities'])
+    
+    if not capabilities:
+        pytest.fail("No capabilities found in the system.")
+    
+    if isinstance(capabilities[0][1], Exception):
+        print(f"Error retrieving capabilities: {capabilities[0][1]}")
+    else:
+        print("Capabilities:", json.dumps(capabilities, indent=2))
+    
+    assert isinstance(capabilities, list)
+    assert len(capabilities) > 0
+
+# Deliverable Test ID: INT_LQ_Test_03 (QKD Endpoints Retrieval)
+def test_retrieve_endpoints(qkd_driver):
+    qkd_driver.Connect()
+    
+    # Retrieve and print endpoint information
+    endpoints = qkd_driver.GetConfig(['endpoints'])
+    
+    if not endpoints:
+        pytest.fail("No endpoints found in the system.")
+    
+    if isinstance(endpoints[0][1], Exception):
+        print(f"Error retrieving endpoints: {endpoints[0][1]}")
+    else:
+        print("Endpoints:", json.dumps(endpoints, indent=2))
+    
+    assert isinstance(endpoints, list)
+    assert len(endpoints) > 0
diff --git a/src/device/tests/qkd/unit/test_qkd_error_hanling.py b/src/device/tests/qkd/unit/test_qkd_error_hanling.py
new file mode 100644
index 0000000000000000000000000000000000000000..a053e819d14a5093e2d0b5681728df61a5776083
--- /dev/null
+++ b/src/device/tests/qkd/unit/test_qkd_error_hanling.py
@@ -0,0 +1,22 @@
+import json
+import pytest
+from src.device.service.drivers.qkd.QKDDriver2 import QKDDriver
+
+def test_error_handling_invalid_operations():
+    driver = QKDDriver(address='10.211.36.220', port=11111, username='user', password='pass')
+    driver.Connect()
+    result = driver.SetConfig([('/invalid/resource', json.dumps({'invalid': 'data'}))])
+
+    # Print the result for debugging purposes
+    print("Result of SetConfig with invalid data:", result)
+
+    # Check if the result contains ValueError for invalid resource keys
+    assert all(isinstance(res, ValueError) for res in result), "Expected ValueError for invalid operations"
+
+def test_network_failure():
+    driver = QKDDriver(address='10.211.36.220', port=11111, username='user', password='pass')
+    driver.Connect()
+    # Simulate network failure by disconnecting the mock server
+    # This would require mock server modification to simulate downtime
+    result = driver.GetConfig(['/qkd_interfaces/qkd_interface'])
+    assert result == []  # Expecting an empty list instead of None
diff --git a/src/device/tests/qkd/unit/test_qkd_mock_connectivity.py b/src/device/tests/qkd/unit/test_qkd_mock_connectivity.py
new file mode 100644
index 0000000000000000000000000000000000000000..49afc8efdde6e9e2446c590e5f757cdbba54586c
--- /dev/null
+++ b/src/device/tests/qkd/unit/test_qkd_mock_connectivity.py
@@ -0,0 +1,24 @@
+import pytest
+from unittest.mock import patch
+from src.device.service.drivers.qkd.QKDDriver2 import QKDDriver
+import requests
+
+@pytest.fixture
+def qkd_driver():
+    return QKDDriver(address='10.211.36.220', port=11111, username='user', password='pass')
+
+# Deliverable Test ID: SBI_Test_01
+def test_qkd_driver_connection(qkd_driver):
+    assert qkd_driver.Connect() is True
+
+# Deliverable Test ID: SBI_Test_01
+def test_qkd_driver_invalid_connection():
+    qkd_driver = QKDDriver(address='10.211.36.220', port=12345, username='user', password='pass')  # Use invalid port directly
+    assert qkd_driver.Connect() is False
+
+# Deliverable Test ID: SBI_Test_10
+@patch('src.device.service.drivers.qkd.QKDDriver2.requests.get')
+def test_qkd_driver_timeout_connection(mock_get, qkd_driver):
+    mock_get.side_effect = requests.exceptions.Timeout
+    qkd_driver.timeout = 0.001  # Simulate very short timeout
+    assert qkd_driver.Connect() is False
diff --git a/src/device/tests/qkd/unit/test_qkd_performance.py b/src/device/tests/qkd/unit/test_qkd_performance.py
new file mode 100644
index 0000000000000000000000000000000000000000..e9ef9cf68bf4ed4fe11e791cfadaa0a80372aef5
--- /dev/null
+++ b/src/device/tests/qkd/unit/test_qkd_performance.py
@@ -0,0 +1,16 @@
+# tests/unit/test_qkd_performance.py
+
+import pytest
+import time
+from src.device.service.drivers.qkd.QKDDriver import QKDDriver
+
+def test_performance_under_load():
+    driver = QKDDriver(address='10.211.36.220', port=11111, username='user', password='pass')
+    driver.Connect()
+    
+    start_time = time.time()
+    for _ in range(1000):
+        driver.GetConfig(['/qkd_interfaces/qkd_interface'])
+    end_time = time.time()
+    
+    assert (end_time - start_time) < 60
diff --git a/src/device/tests/qkd/unit/test_qkd_security.py b/src/device/tests/qkd/unit/test_qkd_security.py
new file mode 100644
index 0000000000000000000000000000000000000000..a9602c7833b58e3d702430b92090842fc440bf33
--- /dev/null
+++ b/src/device/tests/qkd/unit/test_qkd_security.py
@@ -0,0 +1,45 @@
+# test_qkd_security.py
+
+import os
+import pytest
+import requests
+import jwt
+from src.device.service.drivers.qkd.QKDDriver2 import QKDDriver
+
+SECRET_KEY = "your_secret_key"
+
+def generate_jwt_token(username: str) -> str:
+    return jwt.encode({'username': username}, SECRET_KEY, algorithm='HS256')
+
+@pytest.fixture()
+def enable_bypass_auth(request):
+    # Backup the original value of BYPASS_AUTH
+    original_bypass_auth = os.getenv('BYPASS_AUTH')
+    # Set BYPASS_AUTH to true for the test
+    os.environ['BYPASS_AUTH'] = 'true'
+    
+    def restore_bypass_auth():
+        # Restore the original value of BYPASS_AUTH
+        if original_bypass_auth is not None:
+            os.environ['BYPASS_AUTH'] = original_bypass_auth
+        else:
+            del os.environ['BYPASS_AUTH']
+
+    # Add the finalizer to restore the environment variable after the test
+    request.addfinalizer(restore_bypass_auth)
+
+@pytest.mark.usefixtures("enable_bypass_auth")
+def test_authentication():
+    token = generate_jwt_token('wrong_user')
+    driver = QKDDriver(address='10.211.36.220', port=11111, token=token)
+    assert driver.Connect() is False
+
+@pytest.mark.usefixtures("enable_bypass_auth")
+def test_authorization():
+    token = generate_jwt_token('user')
+    driver = QKDDriver(address='10.211.36.220', port=11111, token=token)
+    assert driver.Connect() is True
+    wrong_token = generate_jwt_token('wrong_user')
+    headers = {'Authorization': 'Bearer ' + wrong_token}
+    response = requests.get('http://10.211.36.220:11111/restconf/data/etsi-qkd-sdn-node:qkd_node', headers=headers)
+    assert response.status_code == 401
diff --git a/src/device/tests/qkd/unit/test_qkd_subscription.py b/src/device/tests/qkd/unit/test_qkd_subscription.py
new file mode 100644
index 0000000000000000000000000000000000000000..99a96f08cd06cefa06de388d5ded7e46c5fe7b40
--- /dev/null
+++ b/src/device/tests/qkd/unit/test_qkd_subscription.py
@@ -0,0 +1,23 @@
+# tests/unit/test_qkd_subscription.py
+
+import pytest
+from src.device.service.drivers.qkd.QKDDriver2 import QKDDriver
+
+def test_state_subscription():
+    driver = QKDDriver(address='10.211.36.220', port=11111, username='user', password='pass')
+    driver.Connect()
+    result = driver.SubscribeState([('/qkd_interfaces/qkd_interface', 1.0, 2.0)])
+    assert all(isinstance(res, bool) and res for res in result)
+
+def test_state_unsubscription():
+    driver = QKDDriver(address='10.211.36.220', port=11111, username='user', password='pass')
+    driver.Connect()
+    result = driver.UnsubscribeState(['/qkd_interfaces/qkd_interface'])
+    assert all(isinstance(res, bool) and res for res in result)
+
+def test_state_retrieval():
+    driver = QKDDriver(address='10.211.36.220', port=11111, username='user', password='pass')
+    driver.Connect()
+    state = driver.GetState()
+    assert isinstance(state, dict) or isinstance(state, list)
+
diff --git a/src/device/tests/qkd/unit/validate_context.py b/src/device/tests/qkd/unit/validate_context.py
new file mode 100644
index 0000000000000000000000000000000000000000..9205f8f4f150ab27d57855af6e06f4ad13c695ff
--- /dev/null
+++ b/src/device/tests/qkd/unit/validate_context.py
@@ -0,0 +1,31 @@
+import grpc
+import pytest
+from common.proto.context_pb2 import Empty, ContextId, Uuid
+from common.proto.context_pb2_grpc import ContextServiceStub
+
+@pytest.fixture
+def grpc_stub():
+    channel = grpc.insecure_channel('10.152.183.77:1010')  # Replace with actual server address
+    stub = ContextServiceStub(channel)
+    return stub
+
+def test_retrieve_all_contexts(grpc_stub):
+    try:
+        response = grpc_stub.ListContexts(Empty())
+        assert response is not None
+        assert len(response.contexts) > 0
+        for context in response.contexts:
+            assert isinstance(context.context_uuid.uuid, str)
+            assert isinstance(context.name, str)
+    except grpc.RpcError as e:
+        print(f"gRPC Error: {e}")
+
+def test_retrieve_context_details(grpc_stub):
+    try:
+        uuid = Uuid(uuid="valid-id-here")
+        context_id = ContextId(context_uuid=uuid)
+        response = grpc_stub.GetContext(context_id)
+        assert response is not None
+        assert response.context_uuid.uuid == "valid-id-here"
+    except grpc.RpcError as e:
+        print(f"gRPC Error: {e}")