diff --git a/manifests/deviceservice.yaml b/manifests/deviceservice.yaml
index 950b984420a4cb4d50a455312ecb306752a406fe..ef5195eaeb0d16d2ee5c3e5b7ba2417db461b479 100644
--- a/manifests/deviceservice.yaml
+++ b/manifests/deviceservice.yaml
@@ -39,7 +39,7 @@ spec:
             - containerPort: 9192
           env:
             - name: LOG_LEVEL
-              value: "INFO"
+              value: "DEBUG"
           startupProbe:
             exec:
               command: ["/bin/grpc_health_probe", "-addr=:2020"]
diff --git a/my_deploy.sh b/my_deploy.sh
index a048edb30b66791d5405961b41faf2443f9d51e1..59c7c0a9ad098ff14de4c26bff30e034c1796eb1 100755
--- a/my_deploy.sh
+++ b/my_deploy.sh
@@ -20,7 +20,7 @@
 export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/"
 
 # Set the list of components, separated by spaces, you want to build images for, and deploy.
-export TFS_COMPONENTS="context device pathcomp service slice nbi webui"
+export TFS_COMPONENTS="context device pathcomp service webui"
 
 # Uncomment to activate Monitoring (old)
 #export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring"
@@ -134,7 +134,7 @@ export CRDB_PASSWORD="tfs123"
 export CRDB_DEPLOY_MODE="single"
 
 # Disable flag for dropping database, if it exists.
-export CRDB_DROP_DATABASE_IF_EXISTS=""
+export CRDB_DROP_DATABASE_IF_EXISTS="YES"
 
 # Disable flag for re-deploying CockroachDB from scratch.
 export CRDB_REDEPLOY=""
diff --git a/proto/context.proto b/proto/context.proto
index 9f06d32ee04b5102ce2af511f45f8de34f984599..80281f833fe2e40f4f80b2059daaed082dd492e9 100644
--- a/proto/context.proto
+++ b/proto/context.proto
@@ -223,6 +223,7 @@ enum DeviceDriverEnum {
   DEVICEDRIVER_IETF_ACTN = 10;
   DEVICEDRIVER_OC = 11;
   DEVICEDRIVER_QKD = 12;
+  DEVICEDRIVER_RYU = 13;
 }
 
 enum DeviceOperationalStatusEnum {
diff --git a/scripts/run_openflow.sh b/scripts/run_openflow.sh
new file mode 100755
index 0000000000000000000000000000000000000000..2c525ca70242374ebe7c09993833cee867455167
--- /dev/null
+++ b/scripts/run_openflow.sh
@@ -0,0 +1,8 @@
+PROJECTDIR=`pwd`
+
+cd $PROJECTDIR/src
+RCFILE=$PROJECTDIR/coverage/.coveragerc
+
+# Run unitary tests and analyze coverage of code at same time
+coverage run --rcfile=$RCFILE --append -m pytest --log-level=DEBUG --verbose \
+    device/tests/test_OpenFlow.py
\ No newline at end of file
diff --git a/src/common/DeviceTypes.py b/src/common/DeviceTypes.py
index eb315352b47bbe501f66868c0181a0d34cd6cfed..ccc83c9a6cf446ac60bd64cf5fd0bd632e21ad7b 100644
--- a/src/common/DeviceTypes.py
+++ b/src/common/DeviceTypes.py
@@ -1,4 +1,4 @@
-# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -48,7 +48,7 @@ class DeviceTypeEnum(Enum):
     PACKET_SWITCH                   = 'packet-switch'
     XR_CONSTELLATION                = 'xr-constellation'
     QKD_NODE                        = 'qkd-node'
-    OPEN_ROADM                      = 'openroadm'
+    OPENFLOW_RYU_CONTROLLER         = 'openflow-ryu-controller'
 
     # ETSI TeraFlowSDN controller
     TERAFLOWSDN_CONTROLLER          = 'teraflowsdn'
diff --git a/src/context/service/database/models/enums/DeviceDriver.py b/src/context/service/database/models/enums/DeviceDriver.py
index 5342f788a7b273aa7f6ae3c5779774165cd852bc..691a7c05d7725b6d00bf40b5dd36b5e09fdbbf5d 100644
--- a/src/context/service/database/models/enums/DeviceDriver.py
+++ b/src/context/service/database/models/enums/DeviceDriver.py
@@ -35,6 +35,7 @@ class ORM_DeviceDriverEnum(enum.Enum):
     IETF_ACTN             = DeviceDriverEnum.DEVICEDRIVER_IETF_ACTN
     OC                    = DeviceDriverEnum.DEVICEDRIVER_OC
     QKD                   = DeviceDriverEnum.DEVICEDRIVER_QKD
+    RYU                   = DeviceDriverEnum.DEVICEDRIVER_RYU
 
 grpc_to_enum__device_driver = functools.partial(
     grpc_to_enum, DeviceDriverEnum, ORM_DeviceDriverEnum)
diff --git a/src/device/service/drivers/OpenFlow/OpenFlowDriver.py b/src/device/service/drivers/OpenFlow/OpenFlowDriver.py
new file mode 100644
index 0000000000000000000000000000000000000000..7e70d11fb50351e23f91abb8d92ba149b3f24dc2
--- /dev/null
+++ b/src/device/service/drivers/OpenFlow/OpenFlowDriver.py
@@ -0,0 +1,196 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import json
+import logging, requests, threading
+from requests.auth import HTTPBasicAuth
+from typing import Any, Iterator, List, Optional, Tuple, Union
+from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method
+from common.type_checkers.Checkers import chk_string, chk_type
+from device.service.driver_api._Driver import _Driver,RESOURCE_ENDPOINTS
+from device.service.drivers.OpenFlow.TfsApiClient import TfsApiClient
+from device.service.drivers.OpenFlow.Tools import find_key, get_switches, get_flows , add_flow , delete_flow , get_desc,get_port_desc, get_links_information,get_switches_information,del_flow_entry
+LOGGER = logging.getLogger(__name__)
+
+DRIVER_NAME = 'ryu'
+METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': DRIVER_NAME})
+
+ALL_RESOURCE_KEYS = [
+    RESOURCE_ENDPOINTS,
+]
+
+class OpenFlowDriver(_Driver):
+    def __init__(self, address: str, port: int, **settings) -> None:
+        super().__init__(DRIVER_NAME, address, port, **settings)
+        self.__lock = threading.Lock()
+        self.__started = threading.Event()
+        self.__terminate = threading.Event()
+        username = self.settings.get('username')
+        password = self.settings.get('password')
+        self.__auth = HTTPBasicAuth(username, password) if username is not None and password is not None else None
+        scheme = self.settings.get('scheme', 'http')
+        self.__base_url = '{:s}://{:s}:{:d}'.format(scheme, self.address, int(self.port))
+        self.__timeout = int(self.settings.get('timeout', 120))
+        config = {'mapping_not_needed': False, 'service_endpoint_mapping': []}
+        self.tac = TfsApiClient(self.address, int(self.port), scheme=scheme, username=username, password=password)
+
+    def Connect(self) -> bool:
+        url = f"{self.__base_url}"
+        with self.__lock:
+            try:
+                response = requests.get(url, timeout=self.__timeout, verify=False, auth=self.__auth)
+                response.raise_for_status()
+            except requests.exceptions.Timeout:
+                LOGGER.exception(f"Timeout connecting to {self.__base_url}")
+                return False
+            except requests.exceptions.RequestException as e:
+                LOGGER.exception(f"Exception connecting to {self.__base_url}: {e}")
+                return False
+            else:
+                self.__started.set()
+                return True
+
+    def Disconnect(self) -> bool:
+        with self.__lock:
+            self.__terminate.set()
+            return True
+
+    @metered_subclass_method(METRICS_POOL)
+    def GetInitialConfig(self) -> List[Tuple[str, Any]]:
+        with self.__lock:
+            return []
+        
+    @metered_subclass_method(METRICS_POOL)
+    def GetConfig(self, resource_keys: List[str] = []) -> List[Tuple[str, Union[Any, None, Exception]]]:
+        chk_type('resources', resource_keys, list)
+        results = []
+        with self.__lock:
+            if len(resource_keys) == 0:resource_keys = ALL_RESOURCE_KEYS
+            LOGGER.info(f'resource_key:{ALL_RESOURCE_KEYS}')
+            for i, resource_key in enumerate(resource_keys):
+                str_resource_name = 'resource_key[#{:d}]'.format(i)
+                try:
+                    chk_string(str_resource_name, resource_key, allow_empty=False)
+                    if resource_key == RESOURCE_ENDPOINTS:
+                        LOGGER.info(f'resource_key:{RESOURCE_ENDPOINTS}')
+                        results.extend(self.tac.get_devices_endpoints())  
+                except Exception as e:
+                    LOGGER.exception('Unhandled error processing resource_key({:s})'.format(str(resource_key)))
+                    results.append((resource_key, e))
+        return results
+
+
+#    @metered_subclass_method(METRICS_POOL)
+#    def GetConfig(self, resource_keys: List[str] = []) -> List[Tuple[str, Union[Any, None, Exception]]]:
+#        chk_type('resources', resource_keys, list)
+#        results = []
+#        with self.__lock:
+#            for key in resource_keys:
+#                try:
+#                    if key.startswith('flows:'):
+#                        dpid = key.split(':', 1)[1]
+#                        flows = get_flows(self.__base_url, dpid, auth=self.__auth, timeout=self.__timeout)
+#                        results.append((key, flows))
+#                    elif key.startswith('description:'):
+#                        dpid = key.split(':', 1)[1]
+#                        desc = get_desc(self.__base_url, dpid, auth=self.__auth, timeout=self.__timeout)
+#                        results.append((key, desc))
+#                    elif key.startswith('switches'):
+#                        switches = get_switches(self.__base_url, auth=self.__auth, timeout=self.__timeout)
+#                        results.append((key, switches))
+#                    elif key.startswith('port_description:'):
+#                        dpid = key.split(':', 1)[1]
+#                        desc = get_port_desc(self.__base_url,dpid, auth=self.__auth, timeout=self.__timeout)
+#                        results.append((key, desc))
+#                    elif key.startswith('switch_info'):
+#                        sin = get_switches_information(self.__base_url, auth=self.__auth, timeout=self.__timeout)
+#                        results.append((key, sin))
+#                    elif key.startswith('links_info'):
+#                        lin = get_links_information(self.__base_url, auth=self.__auth, timeout=self.__timeout)
+#                        results.append((key, lin))
+#                    else:
+#                        results.append((key, None))  # If key not handled, append None
+#                except Exception as e:
+#                    results.append((key, e))
+#        return results
+#    
+#    @metered_subclass_method(METRICS_POOL)
+#    def DeleteConfig(self, resource_keys: List[str] = []) -> List[Tuple[str, Union[Any, None, Exception]]]:
+#        chk_type('resources', resource_keys, list)
+#        results = []
+#        with self.__lock:
+#            for item in resource_keys:
+#                try:
+#                    if isinstance(item, tuple):
+#                        key, data = item
+#                    else:
+#                        key, data = item, None
+#                    if key.startswith('flowentry_delete:'):
+#                        dpid = key.split(':', 1)[1]
+#                        flows = del_flow_entry(self.__base_url, dpid, auth=self.__auth, timeout=self.__timeout)
+#                        results.append((key, flows))
+#                    elif key=='flow_data' and data:
+#                        flow_del = delete_flow (self.__base_url,data,auth=self.__auth, timeout=self.__timeout)
+#                        results.append((key, flow_del))
+#                    else:
+#                        results.append((key, None))  
+#                except Exception as e:
+#                    results.append((key, e))
+#        return results
+#
+#    @metered_subclass_method(METRICS_POOL)
+#    def SetConfig(self, resources: List[Tuple[str, Any]]) -> List[Union[bool, Exception]]:
+#        results = []
+#        if not resources:
+#            return results       
+#        with self.__lock:
+#            for item in resources:
+#                LOGGER.info('resources contains: %s', item)
+#                try:
+#                    if isinstance(item, tuple) and len(item) == 2:
+#                        key, flow_data = item
+#                    else:
+#                        LOGGER.warning("Resource format invalid. Each item should be a tuple with (key, data).")
+#                        results.append(False)
+#                        continue
+#                    if key == "flow_data" and isinstance(flow_data, dict):
+#                        LOGGER.info(f"Found valid flow_data entry: {flow_data}")
+#                        success = add_flow(self.__base_url, flow_data, auth=self.__auth, timeout=self.__timeout)
+#                        results.append(success)
+#                    else:
+#                        LOGGER.warning(f"Skipping item with key: {key} due to invalid format or missing data.")
+#                        results.append(False)
+#
+#                except Exception as e:
+#                    LOGGER.error(f"Exception while setting configuration for item {item}: {str(e)}")
+#                    results.append(e)
+#
+#        return results
+#
+#
+#
+#    @metered_subclass_method(METRICS_POOL)
+#    def SubscribeState(self, subscriptions : List[Tuple[str, float, float]]) -> List[Union[bool, Exception]]:
+#        # TODO: TAPI does not support monitoring by now
+#        return [False for _ in subscriptions]
+#
+#    @metered_subclass_method(METRICS_POOL)
+#    def UnsubscribeState(self, subscriptions : List[Tuple[str, float, float]]) -> List[Union[bool, Exception]]:
+#        # TODO: TAPI does not support monitoring by now
+#        return [False for _ in subscriptions]
+#
+#    def GetState(
+#        self, blocking=False, terminate : Optional[threading.Event] = None
+#    ) -> Iterator[Tuple[float, str, Any]]:
+#        # TODO: TAPI does not support monitoring by now
+#        return []
diff --git a/src/device/service/drivers/OpenFlow/TfsApiClient.py b/src/device/service/drivers/OpenFlow/TfsApiClient.py
new file mode 100644
index 0000000000000000000000000000000000000000..5db9c202c6b3d1a10d9f61e9ec7fa43d2547b5b0
--- /dev/null
+++ b/src/device/service/drivers/OpenFlow/TfsApiClient.py
@@ -0,0 +1,144 @@
+# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json, logging, requests
+from os import name
+from requests.auth import HTTPBasicAuth
+from typing import Dict, List, Optional
+
+
+GET_DEVICES_URL = '{:s}://{:s}:{:d}/v1.0/topology/switches'
+GET_LINKS_URL   = '{:s}://{:s}:{:d}/v1.0/topology/links'
+TIMEOUT = 30
+
+HTTP_OK_CODES = {
+    200,    # OK
+    201,    # Created
+    202,    # Accepted
+    204,    # No Content
+}
+
+MAPPING_STATUS = {
+    'DEVICEOPERATIONALSTATUS_UNDEFINED': 0,
+    'DEVICEOPERATIONALSTATUS_DISABLED' : 1,
+    'DEVICEOPERATIONALSTATUS_ENABLED'  : 2,
+}
+
+MAPPING_DRIVER = {
+    'DEVICEDRIVER_UNDEFINED'            : 0,
+    'DEVICEDRIVER_OPENCONFIG'           : 1,
+    'DEVICEDRIVER_TRANSPORT_API'        : 2,
+    'DEVICEDRIVER_P4'                   : 3,
+    'DEVICEDRIVER_IETF_NETWORK_TOPOLOGY': 4,
+    'DEVICEDRIVER_ONF_TR_532'           : 5,
+    'DEVICEDRIVER_XR'                   : 6,
+    'DEVICEDRIVER_IETF_L2VPN'           : 7,
+    'DEVICEDRIVER_GNMI_OPENCONFIG'      : 8,
+    'DEVICEDRIVER_OPTICAL_TFS'          : 9,
+    'DEVICEDRIVER_IETF_ACTN'            : 10,
+    'DEVICEDRIVER_OC'                   : 11,
+    'DEVICEDRIVER_QKD'                  : 12,
+    'DEVICEDRIVER_RYU'                  : 13,
+}
+
+MSG_ERROR = 'Could not retrieve devices in remote TeraFlowSDN instance({:s}). status_code={:s} reply={:s}'
+
+LOGGER = logging.getLogger(__name__)
+
+class TfsApiClient:
+    def __init__(
+        self, address : str, port : int, scheme : str = 'http',
+        username : Optional[str] = None, password : Optional[str] = None
+    ) -> None:
+        self._devices_url = GET_DEVICES_URL.format(scheme, address, port)
+        LOGGER.info(f'self_devices_url{self._devices_url}')
+        self._links_url = GET_LINKS_URL.format(scheme, address, port)
+        self._auth = HTTPBasicAuth(username, password) if username is not None and password is not None else None
+
+    def get_devices_endpoints(self) -> List[Dict]:
+        LOGGER.debug('[get_devices_endpoints] begin')
+
+        reply_switches = requests.get(self._devices_url, timeout=TIMEOUT, verify=False, auth=self._auth)
+        if reply_switches.status_code not in HTTP_OK_CODES:
+            msg = MSG_ERROR.format(str(self._devices_url), str(reply_switches.status_code), str(reply_switches))
+            LOGGER.error(msg)
+            raise Exception(msg)
+
+        json_reply_switches = reply_switches.json()
+        LOGGER.info('[get_devices_endpoints] json_reply_switches={:s}'.format(json.dumps(json_reply_switches)))
+
+        result = list()
+        for json_switch in json_reply_switches:
+            device_uuid: str = json_switch['dpid']
+            device_ports = json_switch.get('ports', []) 
+
+            for port in device_ports: 
+                port_name = port.get('name', '') 
+                device_name = port_name.split('-')[0]
+                port_no = port.get('port_no', '')
+                hw_address = port.get('hw_addr', '')
+
+                device_url = '/devices/device[{:s}]'.format(device_uuid)
+                device_data = {
+                    'uuid': device_uuid,
+                    'name': device_name,
+                    'type': 'packet-switch', 
+                    'status': 2,  # Uncomment if device_status is included
+                    'drivers': 'DEVICEDRIVER_RYU',
+                    }
+                result.append((device_url, device_data))
+        for json_switch in json_reply_switches:
+            device_uuid: str = json_switch['dpid']
+            device_ports = json_switch.get('ports', []) 
+            for port in device_ports: 
+                port_name = port.get('name', '')
+                port_no   = port.get('port_no','')  
+
+                endpoint_uuid = port_name
+                endpoint_url = '/endpoints/endpoint[{:s}]'.format(endpoint_uuid)
+                endpoint_data = {
+                    'device_uuid': device_uuid,
+                    'uuid': port_no,
+                    'name': port_name,
+                    'type': 'copper',
+                }
+                result.append((endpoint_url, endpoint_data))
+#
+        reply = requests.get(self._links_url, timeout=TIMEOUT, verify=False, auth=self._auth)
+        if reply.status_code not in HTTP_OK_CODES:
+            msg = MSG_ERROR.format(str(self._links_url), str(reply.status_code), str(reply))
+            LOGGER.error(msg)
+            raise Exception(msg)
+        for json_link in reply.json():
+            dpid_src = json_link.get('src', {}).get('dpid', '')
+            dpid_dst = json_link.get('dst', {}).get('dpid', '')
+            port_src_name = json_link.get('src', {}).get('name', '')
+            port_dst_name = json_link.get('dst', {}).get('name', '')
+            link_name = f"{port_src_name}=={port_dst_name}"
+            link_uuid = f"{dpid_src}-{port_src_name}==={dpid_dst}-{port_dst_name}"
+            link_endpoint_ids = [
+                (dpid_src, port_src_name),  
+                (dpid_dst, port_dst_name),
+            ]
+            LOGGER.info('link_endpoint_ids [{:s}]'.format(link_endpoint_ids))
+            link_url = '/links/link[{:s}]'.format(link_uuid)
+            link_data = {
+                'uuid': link_uuid,
+                'name': link_name,
+                'endpoints': link_endpoint_ids,
+            }
+            result.append((link_url, link_data))
+#
+        LOGGER.debug('[get_devices_endpoints] topology; returning')
+        return result
diff --git a/src/device/service/drivers/OpenFlow/Tools.py b/src/device/service/drivers/OpenFlow/Tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..d683470876ea8c00fc26be9d6378dd46549b1c75
--- /dev/null
+++ b/src/device/service/drivers/OpenFlow/Tools.py
@@ -0,0 +1,174 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json, logging, operator, requests
+from requests.auth import HTTPBasicAuth
+from typing import Optional
+from device.service.driver_api._Driver import RESOURCE_ENDPOINTS, RESOURCE_SERVICES
+from typing import List, Dict, Optional, Tuple, Union
+
+LOGGER = logging.getLogger(__name__)
+
+RESOURCE_ENDPOINTS = {
+    #get configurations
+    "switches": "/stats/switches",
+    "description": "/stats/desc",
+    "flows": "/stats/flow",
+    "port_description":"/stats/portdesc",
+    "switch_info":"/v1.0/topology/switches",
+    "links_info":"/v1.0/topology/links",
+    #add flow
+    "flow_add": "/stats/flowentry/add",
+    #Delete all matching flow entries of the switch.
+    "flow_delete": "/stats/flowentry/delete",
+    "flowentry_delete":"/stats/flowentry/clear", #according to dpid
+
+}
+
+HTTP_OK_CODES = {
+    200,    # OK
+    201,    # Created
+    202,    # Accepted
+    204,    # No Content
+}
+
+# Utility function to find and extract a specific key from a resource.
+def find_key(resource: Tuple[str, str], key: str) -> Union[dict, str, None]:
+    try:
+        return json.loads(resource[1])[key]
+    except KeyError:
+        LOGGER.warning(f"Key '{key}' not found in resource.")
+        return None
+
+def get_switches(root_url: str, auth: Optional[HTTPBasicAuth] = None, timeout: Optional[int] = None) -> List[Dict]:
+    url = f"{root_url}{RESOURCE_ENDPOINTS['switches']}"
+    result = []
+    try:
+        response = requests.get(url, timeout=timeout, verify=False, auth=auth)
+        response.raise_for_status()
+        switches = response.json()
+        LOGGER.info(f"Successfully retrieved switches: {switches}")
+        result = switches
+    except requests.exceptions.Timeout:
+        LOGGER.exception(f"Timeout connecting to {url}")
+    except requests.exceptions.RequestException as e:
+        LOGGER.exception(f"Error retrieving switches: {str(e)}")
+    return result
+
+def get_switches_information(root_url: str, auth: Optional[HTTPBasicAuth] = None, timeout: Optional[int] = None) -> List[Dict]:
+    url = f"{root_url}{RESOURCE_ENDPOINTS['switch_info']}"
+    result = []
+    try:
+        response = requests.get(url, timeout=timeout, verify=False, auth=auth)
+        response.raise_for_status()
+        switches_info = response.json()
+        LOGGER.info(f"Successfully retrieved switches: {switches_info}")
+        result = switches_info
+    except requests.exceptions.Timeout:
+        LOGGER.exception(f"Timeout connecting to {url}")
+    except requests.exceptions.RequestException as e:
+        LOGGER.exception(f"Error retrieving switches: {str(e)}")
+    return result
+
+def get_links_information(root_url: str, auth: Optional[HTTPBasicAuth] = None, timeout: Optional[int] = None) -> List[Dict]:
+    url = f"{root_url}{RESOURCE_ENDPOINTS['links_info']}"
+    result = []
+    try:
+        response = requests.get(url, timeout=timeout, verify=False, auth=auth)
+        response.raise_for_status()
+        links_info = response.json()
+        LOGGER.info(f"Successfully retrieved switches: {links_info}")
+        result = links_info
+    except requests.exceptions.Timeout:
+        LOGGER.exception(f"Timeout connecting to {url}")
+    except requests.exceptions.RequestException as e:
+        LOGGER.exception(f"Error retrieving switches: {str(e)}")
+    return result
+
+def get_flows(root_url: str, dpid: str, auth: Optional[HTTPBasicAuth] = None, timeout: Optional[int] = None) -> List[Dict]:
+    url = f"{root_url}{RESOURCE_ENDPOINTS['flows']}/{dpid}"
+    try:
+        response = requests.get(url, timeout=timeout, verify=False, auth=auth)
+        response.raise_for_status()
+        flows = response.json()
+        LOGGER.info(f"Successfully retrieved flow rules for DPID {dpid}")
+        return flows
+    except requests.exceptions.RequestException as e:
+        LOGGER.error(f"Failed to retrieve flow rules for DPID {dpid}: {str(e)}")
+        return []
+    
+#get description 
+def get_desc(root_url: str, dpid: str, auth: Optional[HTTPBasicAuth] = None, timeout: Optional[int] = None) -> Dict:
+    url = f"{root_url}{RESOURCE_ENDPOINTS['description']}/{dpid}"
+    try:
+        response = requests.get(url, timeout=timeout, verify=False, auth=auth)
+        response.raise_for_status()
+        desc = response.json()
+        LOGGER.info(f"Successfully retrieved description for DPID {dpid}: {desc}")
+        return desc
+    except requests.exceptions.RequestException as e:
+        LOGGER.error(f"Failed to retrieve description for DPID {dpid}: {str(e)}")
+        return {}
+    
+def get_port_desc(root_url: str, dpid: str, auth: Optional[HTTPBasicAuth] = None, timeout: Optional[int] = None) -> Dict:
+    url = f"{root_url}{RESOURCE_ENDPOINTS['port_description']}/{dpid}"
+    try:
+        response = requests.get(url, timeout=timeout, verify=False, auth=auth)
+        response.raise_for_status()
+        port_desc = response.json()
+        LOGGER.info(f"Successfully retrieved description for DPID {dpid}: {port_desc}")
+        return port_desc
+    except requests.exceptions.RequestException as e:
+        LOGGER.error(f"Failed to retrieve description for DPID {dpid}: {str(e)}")
+        return {}
+
+##according to dpid
+def del_flow_entry(root_url: str, dpid: str, auth: Optional[HTTPBasicAuth] = None, timeout: Optional[int] = None) -> Dict:
+    url = f"{root_url}{RESOURCE_ENDPOINTS['flowentry_delete']}/{dpid}"
+    try:
+        response = requests.delete(url, timeout=timeout, verify=False, auth=auth)
+        response.raise_for_status()
+        flow_desc = response.json()
+        LOGGER.info(f"Successfully retrieved description for DPID {dpid}: {flow_desc}")
+        return flow_desc
+    except requests.exceptions.RequestException as e:
+        LOGGER.error(f"Failed to retrieve description for DPID {dpid}: {str(e)}")
+        return {}
+
+#  to delete a flow based on match criteria.
+def delete_flow(root_url: str, flow_data: dict, auth: Optional[HTTPBasicAuth] = None, timeout: Optional[int] = None) -> bool:
+    url = f"{root_url}{RESOURCE_ENDPOINTS['flow_delete']}"
+    try:
+        response = requests.post(url, json=flow_data, timeout=timeout, verify=False, auth=auth)
+        response.raise_for_status()
+        LOGGER.info(f"Flow configuration deleted successfully for DPID {flow_data.get('dpid')}.")
+        return True
+    except requests.exceptions.RequestException as e:
+        LOGGER.error(f"Failed to delete flow configuration for DPID {flow_data.get('dpid')}: {str(e)}")
+        return False
+    
+def add_flow(root_url: str, flow_data: dict, auth: Optional[HTTPBasicAuth] = None, timeout: Optional[int] = None) -> bool:
+    url = f"{root_url}{RESOURCE_ENDPOINTS['flow_add']}"
+    LOGGER.info(f"Posting flow data: {flow_data} (type: {type(flow_data)}) to URL: {url}") 
+    try:
+        response = requests.post(url, json=flow_data, timeout=timeout, verify=False, auth=auth)
+        response.raise_for_status()
+        LOGGER.info("Flow configuration added successfully.")
+        return True
+    except requests.exceptions.RequestException as e:
+        LOGGER.error(f"Failed to add flow configuration: {str(e)}")
+        return False
+
+
+
diff --git a/src/device/service/drivers/OpenFlow/__init__.py b/src/device/service/drivers/OpenFlow/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..4f3d1a042c69720452803e994c38f7c2e966c684
--- /dev/null
+++ b/src/device/service/drivers/OpenFlow/__init__.py
@@ -0,0 +1,20 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from device.service.driver_api._Driver import RESOURCE_ENDPOINTS, RESOURCE_SERVICES
+
+ALL_RESOURCE_KEYS = [
+    RESOURCE_ENDPOINTS,
+    RESOURCE_SERVICES,
+]
diff --git a/src/device/service/drivers/__init__.py b/src/device/service/drivers/__init__.py
index b99ee50ca8319ab96f9062a3c58c356fa2ae7ec7..837d83d53c75b02f8122990d59b6804a5ba72903 100644
--- a/src/device/service/drivers/__init__.py
+++ b/src/device/service/drivers/__init__.py
@@ -1,4 +1,4 @@
-# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -143,6 +143,16 @@ if LOAD_ALL_DEVICE_DRIVERS:
                 FilterFieldEnum.DRIVER     : DeviceDriverEnum.DEVICEDRIVER_IETF_NETWORK_TOPOLOGY,
             }
         ]))
+if LOAD_ALL_DEVICE_DRIVERS:
+    from.OpenFlow.OpenFlowDriver import OpenFlowDriver
+    DRIVERS.append(
+        (OpenFlowDriver, [
+            {
+                FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.OPENFLOW_RYU_CONTROLLER ,
+                FilterFieldEnum.DRIVER     : DeviceDriverEnum.DEVICEDRIVER_RYU ,
+            }
+        ])
+    )
 
 if LOAD_ALL_DEVICE_DRIVERS:
     from .xr.XrDriver import XrDriver # pylint: disable=wrong-import-position
diff --git a/src/device/tests/test_OpenFlow.py b/src/device/tests/test_OpenFlow.py
new file mode 100644
index 0000000000000000000000000000000000000000..c1d3919eafea68633a3ad9ab145a40cebd25df06
--- /dev/null
+++ b/src/device/tests/test_OpenFlow.py
@@ -0,0 +1,85 @@
+import json
+from re import A
+import resource
+import logging, os, sys, time
+
+from joblib import Logger
+#from typing import Dict, Self, Tuple
+os.environ['DEVICE_EMULATED_ONLY'] = 'YES'
+from device.service.drivers.OpenFlow.OpenFlowDriver import OpenFlowDriver
+logging.basicConfig(level=logging.DEBUG)
+LOGGER = logging.getLogger(__name__)
+LOGGER.setLevel(logging.DEBUG)
+
+
+def test_main():
+    driver_settings = {
+        'protocol': 'http', 
+        'username': None,  
+        'password': None,  
+        'use_tls': False, 
+    }
+    driver = OpenFlowDriver('127.0.0.1', 8080 , **driver_settings)
+    driver.Connect()
+
+    import requests
+
+    response = requests.get("http://127.0.0.1:8080/v1.0/topology/switches", timeout=10)
+    LOGGER.info(f'the response is{response}')
+
+  
+    
+    # Test: GetConfig
+    #resource_keys = [ 'flows:1','description:1','switches','port_description:1','switch_info','links_info']
+  #  config = driver.GetConfig(resource_keys )
+  #  LOGGER.info('Specific configuration: %s', config)
+    
+    #resource_delete=["flowentry_delete:1"]
+    #config = driver.DeleteConfig(resource_delete)
+    #LOGGER.info('Specific configuration: %s', config)
+    #a=driver.GetConfig(["flows:1"])
+    #LOGGER.info('flow 1 = {:s}'.format(str(a)))
+#    delete_data = {
+#    "dpid": 2,
+#    "cookie": 1,
+#    "cookie_mask": 1,
+#    "table_id": 0,
+#    "idle_timeout": 30,
+#    "hard_timeout": 30,
+#    "priority": 11111,
+#    "flags": 1,
+#    "match":{
+#        "in_port":2
+#    },
+#    "actions":[
+#        {
+#            "type":"ddf",
+#            "port": 1
+#        }
+#    ]
+# }
+#    delete_result = driver.DeleteConfig([("flow_data", delete_data)])
+#    LOGGER.info('resources_to_delete = {:s}'.format(str(delete_result)))
+#    a=driver.GetConfig(["flows:1"])
+#    LOGGER.info('flow 2 = {:s}'.format(str(a)))
+#    flow_data = { 
+#        "dpid": 2,
+#        "priority": 22224,
+#        "match": {
+#            "in_port": 1
+#        },
+#        "actions": [
+#            {
+#                "type": "GOTO_TABLE",
+#                "table_id": 1
+#            }
+#        ]
+#    }
+#    set_result = driver.SetConfig([('flow_data',flow_data)])
+#    LOGGER.info(set_result)
+#    driver.Disconnect()
+#    
+    raise Exception ()
+
+if __name__ == '__main__':
+    sys.exit(test_main())
diff --git a/src/webui/service/static/topology_icons/openflow-ryu-controller.png b/src/webui/service/static/topology_icons/openflow-ryu-controller.png
new file mode 100644
index 0000000000000000000000000000000000000000..2982c57308983b367f1fa13c559fb702edcbadfe
Binary files /dev/null and b/src/webui/service/static/topology_icons/openflow-ryu-controller.png differ
diff --git a/tmp-code/DeviceTypes.py b/tmp-code/DeviceTypes.py
new file mode 100644
index 0000000000000000000000000000000000000000..f88ec8bb4814b6e5b85f9e28000d99a47a3ad329
--- /dev/null
+++ b/tmp-code/DeviceTypes.py
@@ -0,0 +1,55 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from enum import Enum
+
+class DeviceTypeEnum(Enum):
+
+    # Abstractions
+    NETWORK                         = 'network'
+
+    # Emulated device types
+    EMULATED_CLIENT                 = 'emu-client'
+    EMULATED_DATACENTER             = 'emu-datacenter'
+    EMULATED_IP_SDN_CONTROLLER      = 'emu-ip-sdn-controller'
+    EMULATED_MICROWAVE_RADIO_SYSTEM = 'emu-microwave-radio-system'
+    EMULATED_OPEN_LINE_SYSTEM       = 'emu-open-line-system'
+    EMULATED_OPTICAL_ROADM          = 'emu-optical-roadm'
+    EMULATED_OPTICAL_TRANSPONDER    = 'emu-optical-transponder'
+    EMULATED_OPTICAL_SPLITTER       = 'emu-optical-splitter'        # passive component required for XR Constellation
+    EMULATED_P4_SWITCH              = 'emu-p4-switch'
+    EMULATED_PACKET_RADIO_ROUTER    = 'emu-packet-radio-router'
+    EMULATED_PACKET_ROUTER          = 'emu-packet-router'
+    EMULATED_PACKET_SWITCH          = 'emu-packet-switch'
+    EMULATED_XR_CONSTELLATION       = 'emu-xr-constellation'
+    EMULATED_OPEN_FLOW_CONTROLLER   = 'open-flow-controller'
+
+    # Real device types
+    CLIENT                          = 'client'
+    DATACENTER                      = 'datacenter'
+    IP_SDN_CONTROLLER               = 'ip-sdn-controller'
+    MICROWAVE_RADIO_SYSTEM          = 'microwave-radio-system'
+    OPEN_LINE_SYSTEM                = 'open-line-system'
+    OPTICAL_ROADM                   = 'optical-roadm'
+    OPTICAL_TRANSPONDER             = 'optical-transponder'
+    P4_SWITCH                       = 'p4-switch'
+    PACKET_RADIO_ROUTER             = 'packet-radio-router'
+    PACKET_ROUTER                   = 'packet-router'
+    PACKET_SWITCH                   = 'packet-switch'
+    XR_CONSTELLATION                = 'xr-constellation'
+    QKD_NODE                        = 'qkd-node'
+    OPENFLOW_RYU_CONTROLLER         = 'openflow-ryu-controller'
+
+    # ETSI TeraFlowSDN controller
+    TERAFLOWSDN_CONTROLLER          = 'teraflowsdn'
diff --git a/tmp-code/OpenFlow/OpenFlowDriver.py b/tmp-code/OpenFlow/OpenFlowDriver.py
new file mode 100644
index 0000000000000000000000000000000000000000..2aee0cd298c836bbed98b14fbcec9d2b14d93541
--- /dev/null
+++ b/tmp-code/OpenFlow/OpenFlowDriver.py
@@ -0,0 +1,173 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import json
+import logging, requests, threading
+from requests.auth import HTTPBasicAuth
+from typing import Any, Iterator, List, Optional, Tuple, Union
+from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method
+from common.type_checkers.Checkers import chk_string, chk_type
+from device.service.driver_api._Driver import _Driver
+from . import ALL_RESOURCE_KEYS
+from device.service.drivers.OpenFlow.Tools import find_key, get_switches, get_flows , add_flow , delete_flow , get_desc,get_port_desc, get_links_information,get_switches_information,del_flow_entry
+LOGGER = logging.getLogger(__name__)
+
+DRIVER_NAME = 'openflow_api'
+METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': DRIVER_NAME})
+
+class OpenFlowDriver(_Driver):
+    def __init__(self, address: str, port: int, **settings) -> None:
+        super().__init__(DRIVER_NAME, address, port, **settings)
+        self.__lock = threading.Lock()
+        self.__started = threading.Event()
+        self.__terminate = threading.Event()
+        username = self.settings.get('username')
+        password = self.settings.get('password')
+        self.__auth = HTTPBasicAuth(username, password) if username is not None and password is not None else None
+        scheme = self.settings.get('scheme', 'http')
+        self.__base_url = '{:s}://{:s}:{:d}'.format(scheme, self.address, int(self.port))
+        self.__timeout = int(self.settings.get('timeout', 120))
+
+    def Connect(self) -> bool:
+        url = f"{self.__base_url}/stats/desc/1"
+        with self.__lock:
+            if self.__started.is_set():
+                return True
+            try:
+                response = requests.get(url, timeout=self.__timeout, verify=False, auth=self.__auth)
+                response.raise_for_status()
+            except requests.exceptions.Timeout:
+                LOGGER.exception(f"Timeout connecting to {self.__base_url}")
+                return False
+            except requests.exceptions.RequestException as e:
+                LOGGER.exception(f"Exception connecting to {self.__base_url}: {e}")
+                return False
+            else:
+                self.__started.set()
+                return True
+
+    def Disconnect(self) -> bool:
+        with self.__lock:
+            self.__terminate.set()
+            return True
+
+    #@metered_subclass_method(METRICS_POOL)
+    #def GetInitialConfig(self) -> List[Tuple[str, Any]]:
+    #    with self.__lock:
+    #        switches = get_switches(self.__base_url, auth=self.__auth, timeout=self.__timeout)
+    #        return [("switches", switches)]
+
+    @metered_subclass_method(METRICS_POOL)
+    def GetConfig(self, resource_keys: List[str] = []) -> List[Tuple[str, Union[Any, None, Exception]]]:
+        chk_type('resources', resource_keys, list)
+        results = []
+        with self.__lock:
+            for key in resource_keys:
+                try:
+                    if key.startswith('flows:'):
+                        dpid = key.split(':', 1)[1]
+                        flows = get_flows(self.__base_url, dpid, auth=self.__auth, timeout=self.__timeout)
+                        results.append((key, flows))
+                    elif key.startswith('description:'):
+                        dpid = key.split(':', 1)[1]
+                        desc = get_desc(self.__base_url, dpid, auth=self.__auth, timeout=self.__timeout)
+                        results.append((key, desc))
+                    elif key.startswith('switches'):
+                        switches = get_switches(self.__base_url, auth=self.__auth, timeout=self.__timeout)
+                        results.append((key, switches))
+                    elif key.startswith('port_description:'):
+                        dpid = key.split(':', 1)[1]
+                        desc = get_port_desc(self.__base_url,dpid, auth=self.__auth, timeout=self.__timeout)
+                        results.append((key, desc))
+                    elif key.startswith('switch_info'):
+                        sin = get_switches_information(self.__base_url, auth=self.__auth, timeout=self.__timeout)
+                        results.append((key, sin))
+                    elif key.startswith('links_info'):
+                        lin = get_links_information(self.__base_url, auth=self.__auth, timeout=self.__timeout)
+                        results.append((key, lin))
+                    else:
+                        results.append((key, None))  # If key not handled, append None
+                except Exception as e:
+                    results.append((key, e))
+        return results
+    
+    @metered_subclass_method(METRICS_POOL)
+    def DeleteConfig(self, resource_keys: List[str] = []) -> List[Tuple[str, Union[Any, None, Exception]]]:
+        chk_type('resources', resource_keys, list)
+        results = []
+        with self.__lock:
+            for item in resource_keys:
+                try:
+                    if isinstance(item, tuple):
+                        key, data = item
+                    else:
+                        key, data = item, None
+                    if key.startswith('flowentry_delete:'):
+                        dpid = key.split(':', 1)[1]
+                        flows = del_flow_entry(self.__base_url, dpid, auth=self.__auth, timeout=self.__timeout)
+                        results.append((key, flows))
+                    elif key=='flow_data' and data:
+                        flow_del = delete_flow (self.__base_url,data,auth=self.__auth, timeout=self.__timeout)
+                        results.append((key, flow_del))
+                    else:
+                        results.append((key, None))  
+                except Exception as e:
+                    results.append((key, e))
+        return results
+
+    @metered_subclass_method(METRICS_POOL)
+    def SetConfig(self, resources: List[Tuple[str, Any]]) -> List[Union[bool, Exception]]:
+        results = []
+        if not resources:
+            return results       
+        with self.__lock:
+            for item in resources:
+                LOGGER.info('resources contains: %s', item)
+                try:
+                    if isinstance(item, tuple) and len(item) == 2:
+                        key, flow_data = item
+                    else:
+                        LOGGER.warning("Resource format invalid. Each item should be a tuple with (key, data).")
+                        results.append(False)
+                        continue
+                    if key == "flow_data" and isinstance(flow_data, dict):
+                        LOGGER.info(f"Found valid flow_data entry: {flow_data}")
+                        success = add_flow(self.__base_url, flow_data, auth=self.__auth, timeout=self.__timeout)
+                        results.append(success)
+                    else:
+                        LOGGER.warning(f"Skipping item with key: {key} due to invalid format or missing data.")
+                        results.append(False)
+
+                except Exception as e:
+                    LOGGER.error(f"Exception while setting configuration for item {item}: {str(e)}")
+                    results.append(e)
+
+        return results
+
+
+
+    @metered_subclass_method(METRICS_POOL)
+    def SubscribeState(self, subscriptions : List[Tuple[str, float, float]]) -> List[Union[bool, Exception]]:
+        # TODO: TAPI does not support monitoring by now
+        return [False for _ in subscriptions]
+
+    @metered_subclass_method(METRICS_POOL)
+    def UnsubscribeState(self, subscriptions : List[Tuple[str, float, float]]) -> List[Union[bool, Exception]]:
+        # TODO: TAPI does not support monitoring by now
+        return [False for _ in subscriptions]
+
+    def GetState(
+        self, blocking=False, terminate : Optional[threading.Event] = None
+    ) -> Iterator[Tuple[float, str, Any]]:
+        # TODO: TAPI does not support monitoring by now
+        return []
diff --git a/tmp-code/OpenFlow/Tools.py b/tmp-code/OpenFlow/Tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..d683470876ea8c00fc26be9d6378dd46549b1c75
--- /dev/null
+++ b/tmp-code/OpenFlow/Tools.py
@@ -0,0 +1,174 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json, logging, operator, requests
+from requests.auth import HTTPBasicAuth
+from typing import Optional
+from device.service.driver_api._Driver import RESOURCE_ENDPOINTS, RESOURCE_SERVICES
+from typing import List, Dict, Optional, Tuple, Union
+
+LOGGER = logging.getLogger(__name__)
+
+RESOURCE_ENDPOINTS = {
+    #get configurations
+    "switches": "/stats/switches",
+    "description": "/stats/desc",
+    "flows": "/stats/flow",
+    "port_description":"/stats/portdesc",
+    "switch_info":"/v1.0/topology/switches",
+    "links_info":"/v1.0/topology/links",
+    #add flow
+    "flow_add": "/stats/flowentry/add",
+    #Delete all matching flow entries of the switch.
+    "flow_delete": "/stats/flowentry/delete",
+    "flowentry_delete":"/stats/flowentry/clear", #according to dpid
+
+}
+
+HTTP_OK_CODES = {
+    200,    # OK
+    201,    # Created
+    202,    # Accepted
+    204,    # No Content
+}
+
+# Utility function to find and extract a specific key from a resource.
+def find_key(resource: Tuple[str, str], key: str) -> Union[dict, str, None]:
+    try:
+        return json.loads(resource[1])[key]
+    except KeyError:
+        LOGGER.warning(f"Key '{key}' not found in resource.")
+        return None
+
+def get_switches(root_url: str, auth: Optional[HTTPBasicAuth] = None, timeout: Optional[int] = None) -> List[Dict]:
+    url = f"{root_url}{RESOURCE_ENDPOINTS['switches']}"
+    result = []
+    try:
+        response = requests.get(url, timeout=timeout, verify=False, auth=auth)
+        response.raise_for_status()
+        switches = response.json()
+        LOGGER.info(f"Successfully retrieved switches: {switches}")
+        result = switches
+    except requests.exceptions.Timeout:
+        LOGGER.exception(f"Timeout connecting to {url}")
+    except requests.exceptions.RequestException as e:
+        LOGGER.exception(f"Error retrieving switches: {str(e)}")
+    return result
+
+def get_switches_information(root_url: str, auth: Optional[HTTPBasicAuth] = None, timeout: Optional[int] = None) -> List[Dict]:
+    url = f"{root_url}{RESOURCE_ENDPOINTS['switch_info']}"
+    result = []
+    try:
+        response = requests.get(url, timeout=timeout, verify=False, auth=auth)
+        response.raise_for_status()
+        switches_info = response.json()
+        LOGGER.info(f"Successfully retrieved switches: {switches_info}")
+        result = switches_info
+    except requests.exceptions.Timeout:
+        LOGGER.exception(f"Timeout connecting to {url}")
+    except requests.exceptions.RequestException as e:
+        LOGGER.exception(f"Error retrieving switches: {str(e)}")
+    return result
+
+def get_links_information(root_url: str, auth: Optional[HTTPBasicAuth] = None, timeout: Optional[int] = None) -> List[Dict]:
+    url = f"{root_url}{RESOURCE_ENDPOINTS['links_info']}"
+    result = []
+    try:
+        response = requests.get(url, timeout=timeout, verify=False, auth=auth)
+        response.raise_for_status()
+        links_info = response.json()
+        LOGGER.info(f"Successfully retrieved switches: {links_info}")
+        result = links_info
+    except requests.exceptions.Timeout:
+        LOGGER.exception(f"Timeout connecting to {url}")
+    except requests.exceptions.RequestException as e:
+        LOGGER.exception(f"Error retrieving switches: {str(e)}")
+    return result
+
+def get_flows(root_url: str, dpid: str, auth: Optional[HTTPBasicAuth] = None, timeout: Optional[int] = None) -> List[Dict]:
+    url = f"{root_url}{RESOURCE_ENDPOINTS['flows']}/{dpid}"
+    try:
+        response = requests.get(url, timeout=timeout, verify=False, auth=auth)
+        response.raise_for_status()
+        flows = response.json()
+        LOGGER.info(f"Successfully retrieved flow rules for DPID {dpid}")
+        return flows
+    except requests.exceptions.RequestException as e:
+        LOGGER.error(f"Failed to retrieve flow rules for DPID {dpid}: {str(e)}")
+        return []
+    
+#get description 
+def get_desc(root_url: str, dpid: str, auth: Optional[HTTPBasicAuth] = None, timeout: Optional[int] = None) -> Dict:
+    url = f"{root_url}{RESOURCE_ENDPOINTS['description']}/{dpid}"
+    try:
+        response = requests.get(url, timeout=timeout, verify=False, auth=auth)
+        response.raise_for_status()
+        desc = response.json()
+        LOGGER.info(f"Successfully retrieved description for DPID {dpid}: {desc}")
+        return desc
+    except requests.exceptions.RequestException as e:
+        LOGGER.error(f"Failed to retrieve description for DPID {dpid}: {str(e)}")
+        return {}
+    
+def get_port_desc(root_url: str, dpid: str, auth: Optional[HTTPBasicAuth] = None, timeout: Optional[int] = None) -> Dict:
+    url = f"{root_url}{RESOURCE_ENDPOINTS['port_description']}/{dpid}"
+    try:
+        response = requests.get(url, timeout=timeout, verify=False, auth=auth)
+        response.raise_for_status()
+        port_desc = response.json()
+        LOGGER.info(f"Successfully retrieved description for DPID {dpid}: {port_desc}")
+        return port_desc
+    except requests.exceptions.RequestException as e:
+        LOGGER.error(f"Failed to retrieve description for DPID {dpid}: {str(e)}")
+        return {}
+
+##according to dpid
+def del_flow_entry(root_url: str, dpid: str, auth: Optional[HTTPBasicAuth] = None, timeout: Optional[int] = None) -> Dict:
+    url = f"{root_url}{RESOURCE_ENDPOINTS['flowentry_delete']}/{dpid}"
+    try:
+        response = requests.delete(url, timeout=timeout, verify=False, auth=auth)
+        response.raise_for_status()
+        flow_desc = response.json()
+        LOGGER.info(f"Successfully retrieved description for DPID {dpid}: {flow_desc}")
+        return flow_desc
+    except requests.exceptions.RequestException as e:
+        LOGGER.error(f"Failed to retrieve description for DPID {dpid}: {str(e)}")
+        return {}
+
+#  to delete a flow based on match criteria.
+def delete_flow(root_url: str, flow_data: dict, auth: Optional[HTTPBasicAuth] = None, timeout: Optional[int] = None) -> bool:
+    url = f"{root_url}{RESOURCE_ENDPOINTS['flow_delete']}"
+    try:
+        response = requests.post(url, json=flow_data, timeout=timeout, verify=False, auth=auth)
+        response.raise_for_status()
+        LOGGER.info(f"Flow configuration deleted successfully for DPID {flow_data.get('dpid')}.")
+        return True
+    except requests.exceptions.RequestException as e:
+        LOGGER.error(f"Failed to delete flow configuration for DPID {flow_data.get('dpid')}: {str(e)}")
+        return False
+    
+def add_flow(root_url: str, flow_data: dict, auth: Optional[HTTPBasicAuth] = None, timeout: Optional[int] = None) -> bool:
+    url = f"{root_url}{RESOURCE_ENDPOINTS['flow_add']}"
+    LOGGER.info(f"Posting flow data: {flow_data} (type: {type(flow_data)}) to URL: {url}") 
+    try:
+        response = requests.post(url, json=flow_data, timeout=timeout, verify=False, auth=auth)
+        response.raise_for_status()
+        LOGGER.info("Flow configuration added successfully.")
+        return True
+    except requests.exceptions.RequestException as e:
+        LOGGER.error(f"Failed to add flow configuration: {str(e)}")
+        return False
+
+
+
diff --git a/tmp-code/OpenFlow/__init__.py b/tmp-code/OpenFlow/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..4f3d1a042c69720452803e994c38f7c2e966c684
--- /dev/null
+++ b/tmp-code/OpenFlow/__init__.py
@@ -0,0 +1,20 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from device.service.driver_api._Driver import RESOURCE_ENDPOINTS, RESOURCE_SERVICES
+
+ALL_RESOURCE_KEYS = [
+    RESOURCE_ENDPOINTS,
+    RESOURCE_SERVICES,
+]
diff --git a/tmp-code/__init__.py b/tmp-code/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..487cf7d40aac66251f3657867207f4c4ac66514d
--- /dev/null
+++ b/tmp-code/__init__.py
@@ -0,0 +1,202 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+from common.DeviceTypes import DeviceTypeEnum
+from common.proto.context_pb2 import DeviceDriverEnum
+from device.Config import LOAD_ALL_DEVICE_DRIVERS
+from ..driver_api.FilterFields import FilterFieldEnum
+
+DRIVERS = []
+
+from .emulated.EmulatedDriver import EmulatedDriver # pylint: disable=wrong-import-position
+DRIVERS.append(
+    (EmulatedDriver, [
+        # TODO: multi-filter is not working
+        {
+            FilterFieldEnum.DEVICE_TYPE: [
+                DeviceTypeEnum.EMULATED_DATACENTER,
+                DeviceTypeEnum.EMULATED_MICROWAVE_RADIO_SYSTEM,
+                DeviceTypeEnum.EMULATED_OPEN_LINE_SYSTEM,
+                DeviceTypeEnum.EMULATED_OPTICAL_ROADM,
+                DeviceTypeEnum.EMULATED_OPTICAL_TRANSPONDER,
+                DeviceTypeEnum.EMULATED_P4_SWITCH,
+                DeviceTypeEnum.EMULATED_PACKET_ROUTER,
+                DeviceTypeEnum.EMULATED_PACKET_SWITCH,
+
+                #DeviceTypeEnum.DATACENTER,
+                #DeviceTypeEnum.MICROWAVE_RADIO_SYSTEM,
+                #DeviceTypeEnum.OPEN_LINE_SYSTEM,
+                #DeviceTypeEnum.OPTICAL_ROADM,
+                #DeviceTypeEnum.OPTICAL_TRANSPONDER,
+                #DeviceTypeEnum.P4_SWITCH,
+                #DeviceTypeEnum.PACKET_ROUTER,
+                #DeviceTypeEnum.PACKET_SWITCH,
+            ],
+            FilterFieldEnum.DRIVER: [
+                DeviceDriverEnum.DEVICEDRIVER_UNDEFINED,
+            ],
+        },
+        #{
+        #    # Emulated devices, all drivers => use Emulated
+        #    FilterFieldEnum.DEVICE_TYPE: [
+        #        DeviceTypeEnum.EMULATED_DATACENTER,
+        #        DeviceTypeEnum.EMULATED_MICROWAVE_RADIO_SYSTEM,
+        #        DeviceTypeEnum.EMULATED_OPEN_LINE_SYSTEM,
+        #        DeviceTypeEnum.EMULATED_OPTICAL_ROADM,
+        #        DeviceTypeEnum.EMULATED_OPTICAL_TRANSPONDER,
+        #        DeviceTypeEnum.EMULATED_P4_SWITCH,
+        #        DeviceTypeEnum.EMULATED_PACKET_ROUTER,
+        #        DeviceTypeEnum.EMULATED_PACKET_SWITCH,
+        #    ],
+        #    FilterFieldEnum.DRIVER: [
+        #        DeviceDriverEnum.DEVICEDRIVER_UNDEFINED,
+        #        DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG,
+        #        DeviceDriverEnum.DEVICEDRIVER_TRANSPORT_API,
+        #        DeviceDriverEnum.DEVICEDRIVER_P4,
+        #        DeviceDriverEnum.DEVICEDRIVER_IETF_NETWORK_TOPOLOGY,
+        #        DeviceDriverEnum.DEVICEDRIVER_ONF_TR_532,
+        #        DeviceDriverEnum.DEVICEDRIVER_GNMI_OPENCONFIG,
+        #    ],
+        #}
+    ]))
+
+from .ietf_l2vpn.IetfL2VpnDriver import IetfL2VpnDriver # pylint: disable=wrong-import-position
+DRIVERS.append(
+    (IetfL2VpnDriver, [
+        {
+            FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.TERAFLOWSDN_CONTROLLER,
+            FilterFieldEnum.DRIVER: DeviceDriverEnum.DEVICEDRIVER_IETF_L2VPN,
+        }
+    ]))
+
+from .ietf_actn.IetfActnDriver import IetfActnDriver # pylint: disable=wrong-import-position
+DRIVERS.append(
+    (IetfActnDriver, [
+        {
+            FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.OPEN_LINE_SYSTEM,
+            FilterFieldEnum.DRIVER: DeviceDriverEnum.DEVICEDRIVER_IETF_ACTN,
+        }
+    ]))
+
+if LOAD_ALL_DEVICE_DRIVERS:
+    from .openconfig.OpenConfigDriver import OpenConfigDriver # pylint: disable=wrong-import-position
+    DRIVERS.append(
+        (OpenConfigDriver, [
+            {
+                # Real Packet Router, specifying OpenConfig Driver => use OpenConfigDriver
+                FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.PACKET_ROUTER,
+                FilterFieldEnum.DRIVER     : DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG,
+            }
+        ]))
+
+if LOAD_ALL_DEVICE_DRIVERS:
+    from .gnmi_openconfig.GnmiOpenConfigDriver import GnmiOpenConfigDriver # pylint: disable=wrong-import-position
+    DRIVERS.append(
+        (GnmiOpenConfigDriver, [
+            {
+                # Real Packet Router, specifying gNMI OpenConfig Driver => use GnmiOpenConfigDriver
+                FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.PACKET_ROUTER,
+                FilterFieldEnum.DRIVER     : DeviceDriverEnum.DEVICEDRIVER_GNMI_OPENCONFIG,
+            }
+        ]))
+
+if LOAD_ALL_DEVICE_DRIVERS:
+    from .transport_api.TransportApiDriver import TransportApiDriver # pylint: disable=wrong-import-position
+    DRIVERS.append(
+        (TransportApiDriver, [
+            {
+                # Real OLS, specifying TAPI Driver => use TransportApiDriver
+                FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.OPEN_LINE_SYSTEM,
+                FilterFieldEnum.DRIVER     : DeviceDriverEnum.DEVICEDRIVER_TRANSPORT_API,
+            }
+        ]))
+
+if LOAD_ALL_DEVICE_DRIVERS:
+    from .p4.p4_driver import P4Driver # pylint: disable=wrong-import-position
+    DRIVERS.append(
+        (P4Driver, [
+            {
+                # Real P4 Switch, specifying P4 Driver => use P4Driver
+                FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.P4_SWITCH,
+                FilterFieldEnum.DRIVER     : DeviceDriverEnum.DEVICEDRIVER_P4,
+            }
+        ]))
+
+if LOAD_ALL_DEVICE_DRIVERS:
+    from .microwave.IETFApiDriver import IETFApiDriver # pylint: disable=wrong-import-position
+    DRIVERS.append(
+        (IETFApiDriver, [
+            {
+                FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.MICROWAVE_RADIO_SYSTEM,
+                FilterFieldEnum.DRIVER     : DeviceDriverEnum.DEVICEDRIVER_IETF_NETWORK_TOPOLOGY,
+            }
+        ]))
+if LOAD_ALL_DEVICE_DRIVERS:
+    from.OpenFlow.OpenFlowDriver import OpenFlowDriver
+    DRIVERS.append(
+        (OpenFlowDriver, [
+            {
+                # Specify the device type and driver that should use OpenFlowDriver
+                FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.OPENFLOW_RYU_CONTROLLER ,
+                FilterFieldEnum.DRIVER: DeviceDriverEnum.DEVICEDRIVER_OPENFLOW,
+            }
+        ])
+    )
+
+if LOAD_ALL_DEVICE_DRIVERS:
+    from .xr.XrDriver import XrDriver # pylint: disable=wrong-import-position
+    DRIVERS.append(
+        (XrDriver, [
+            {
+                # Close enough, it does optical switching
+                FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.XR_CONSTELLATION,
+                FilterFieldEnum.DRIVER     : DeviceDriverEnum.DEVICEDRIVER_XR,
+            }
+        ]))
+
+if LOAD_ALL_DEVICE_DRIVERS:
+    from .optical_tfs.OpticalTfsDriver import OpticalTfsDriver # pylint: disable=wrong-import-position
+    DRIVERS.append(
+        (OpticalTfsDriver, [
+            {
+                FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.OPEN_LINE_SYSTEM,
+                FilterFieldEnum.DRIVER: DeviceDriverEnum.DEVICEDRIVER_OPTICAL_TFS,
+            }
+        ]))
+
+if LOAD_ALL_DEVICE_DRIVERS:
+    from .oc_driver.OCDriver import OCDriver # pylint: disable=wrong-import-position
+    DRIVERS.append(
+        (OCDriver, [
+            {
+                # Real Packet Router, specifying OpenConfig Driver => use OpenConfigDriver
+                FilterFieldEnum.DEVICE_TYPE: [
+                    DeviceTypeEnum.OPTICAL_ROADM,
+                    DeviceTypeEnum.OPTICAL_TRANSPONDER
+                ],
+                FilterFieldEnum.DRIVER     : DeviceDriverEnum.DEVICEDRIVER_OC,
+            }
+        ]))
+
+if LOAD_ALL_DEVICE_DRIVERS:
+    from .qkd.QKDDriver2 import QKDDriver # pylint: disable=wrong-import-position
+    DRIVERS.append(
+        (QKDDriver, [
+            {
+                # Close enough, it does optical switching
+                FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.QKD_NODE,
+                FilterFieldEnum.DRIVER     : DeviceDriverEnum.DEVICEDRIVER_QKD,
+            }
+        ]))
diff --git a/tmp-code/context.proto b/tmp-code/context.proto
new file mode 100644
index 0000000000000000000000000000000000000000..2ab6f0aea615efcb2ab24bbbf6938f5c84a95a45
--- /dev/null
+++ b/tmp-code/context.proto
@@ -0,0 +1,698 @@
+// Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+package context;
+
+import "acl.proto";
+import "kpi_sample_types.proto";
+
+service ContextService {
+  rpc ListContextIds     (Empty         ) returns (       ContextIdList   ) {}
+  rpc ListContexts       (Empty         ) returns (       ContextList     ) {}
+  rpc GetContext         (ContextId     ) returns (       Context         ) {}
+  rpc SetContext         (Context       ) returns (       ContextId       ) {}
+  rpc RemoveContext      (ContextId     ) returns (       Empty           ) {}
+  rpc GetContextEvents   (Empty         ) returns (stream ContextEvent    ) {}
+
+  rpc ListTopologyIds    (ContextId     ) returns (       TopologyIdList  ) {}
+  rpc ListTopologies     (ContextId     ) returns (       TopologyList    ) {}
+  rpc GetTopology        (TopologyId    ) returns (       Topology        ) {}
+  rpc GetTopologyDetails (TopologyId    ) returns (       TopologyDetails ) {}
+  rpc SetTopology        (Topology      ) returns (       TopologyId      ) {}
+  rpc RemoveTopology     (TopologyId    ) returns (       Empty           ) {}
+  rpc GetTopologyEvents  (Empty         ) returns (stream TopologyEvent   ) {}
+
+  rpc ListDeviceIds      (Empty         ) returns (       DeviceIdList    ) {}
+  rpc ListDevices        (Empty         ) returns (       DeviceList      ) {}
+  rpc GetDevice          (DeviceId      ) returns (       Device          ) {}
+  rpc SetDevice          (Device        ) returns (       DeviceId        ) {}
+  rpc RemoveDevice       (DeviceId      ) returns (       Empty           ) {}
+  rpc GetDeviceEvents    (Empty         ) returns (stream DeviceEvent     ) {}
+  rpc SelectDevice       (DeviceFilter  ) returns (       DeviceList      ) {}
+  rpc ListEndPointNames  (EndPointIdList) returns (       EndPointNameList) {}
+
+  rpc ListLinkIds        (Empty         ) returns (       LinkIdList      ) {}
+  rpc ListLinks          (Empty         ) returns (       LinkList        ) {}
+  rpc GetLink            (LinkId        ) returns (       Link            ) {}
+  rpc SetLink            (Link          ) returns (       LinkId          ) {}
+  rpc RemoveLink         (LinkId        ) returns (       Empty           ) {}
+  rpc GetLinkEvents      (Empty         ) returns (stream LinkEvent       ) {}
+
+  rpc ListServiceIds     (ContextId     ) returns (       ServiceIdList   ) {}
+  rpc ListServices       (ContextId     ) returns (       ServiceList     ) {}
+  rpc GetService         (ServiceId     ) returns (       Service         ) {}
+  rpc SetService         (Service       ) returns (       ServiceId       ) {}
+  rpc UnsetService       (Service       ) returns (       ServiceId       ) {}
+  rpc RemoveService      (ServiceId     ) returns (       Empty           ) {}
+  rpc GetServiceEvents   (Empty         ) returns (stream ServiceEvent    ) {}
+  rpc SelectService      (ServiceFilter ) returns (       ServiceList     ) {}
+
+  rpc ListSliceIds       (ContextId     ) returns (       SliceIdList     ) {}
+  rpc ListSlices         (ContextId     ) returns (       SliceList       ) {}
+  rpc GetSlice           (SliceId       ) returns (       Slice           ) {}
+  rpc SetSlice           (Slice         ) returns (       SliceId         ) {}
+  rpc UnsetSlice         (Slice         ) returns (       SliceId         ) {}
+  rpc RemoveSlice        (SliceId       ) returns (       Empty           ) {}
+  rpc GetSliceEvents     (Empty         ) returns (stream SliceEvent      ) {}
+  rpc SelectSlice        (SliceFilter   ) returns (       SliceList       ) {}
+
+  rpc ListConnectionIds  (ServiceId     ) returns (       ConnectionIdList) {}
+  rpc ListConnections    (ServiceId     ) returns (       ConnectionList  ) {}
+  rpc GetConnection      (ConnectionId  ) returns (       Connection      ) {}
+  rpc SetConnection      (Connection    ) returns (       ConnectionId    ) {}
+  rpc RemoveConnection   (ConnectionId  ) returns (       Empty           ) {}
+  rpc GetConnectionEvents(Empty         ) returns (stream ConnectionEvent ) {}
+
+ 
+  // ------------------------------ Experimental -----------------------------
+  rpc GetOpticalConfig   (Empty          ) returns (OpticalConfigList     ) {}
+  rpc SetOpticalConfig   (OpticalConfig  ) returns (OpticalConfigId       ) {}
+  rpc SelectOpticalConfig(OpticalConfigId) returns (OpticalConfig         ) {}
+
+  rpc SetOpticalLink     (OpticalLink    ) returns (Empty                 ) {}
+  rpc GetOpticalLink     (OpticalLinkId  ) returns (OpticalLink           ) {}
+  rpc GetFiber           (FiberId        ) returns (Fiber                 ) {}
+}
+
+// ----- Generic -------------------------------------------------------------------------------------------------------
+message Empty {}
+
+message Uuid {
+  string uuid = 1;
+}
+
+enum EventTypeEnum {
+  EVENTTYPE_UNDEFINED = 0;
+  EVENTTYPE_CREATE = 1;
+  EVENTTYPE_UPDATE = 2;
+  EVENTTYPE_REMOVE = 3;
+}
+
+message Timestamp {
+  double timestamp = 1;
+}
+
+message Event {
+  Timestamp timestamp = 1;
+  EventTypeEnum event_type = 2;
+}
+
+// ----- Context -------------------------------------------------------------------------------------------------------
+message ContextId {
+  Uuid context_uuid = 1;
+}
+
+message Context {
+  ContextId context_id = 1;
+  string name = 2;
+  repeated TopologyId topology_ids = 3;
+  repeated ServiceId service_ids = 4;
+  repeated SliceId slice_ids = 5;
+  TeraFlowController controller = 6;
+}
+
+message ContextIdList {
+  repeated ContextId context_ids = 1;
+}
+
+message ContextList {
+  repeated Context contexts = 1;
+}
+
+message ContextEvent {
+  Event event = 1;
+  ContextId context_id = 2;
+}
+
+
+// ----- Topology ------------------------------------------------------------------------------------------------------
+message TopologyId {
+  ContextId context_id = 1;
+  Uuid topology_uuid = 2;
+}
+
+message Topology {
+  TopologyId topology_id = 1;
+  string name = 2;
+  repeated DeviceId device_ids = 3;
+  repeated LinkId link_ids = 4;
+}
+
+message TopologyDetails {
+  TopologyId topology_id = 1;
+  string name = 2;
+  repeated Device devices = 3;
+  repeated Link links = 4;
+}
+
+message TopologyIdList {
+  repeated TopologyId topology_ids = 1;
+}
+
+message TopologyList {
+  repeated Topology topologies = 1;
+}
+
+message TopologyEvent {
+  Event event = 1;
+  TopologyId topology_id = 2;
+}
+
+
+// ----- Device --------------------------------------------------------------------------------------------------------
+message DeviceId {
+  Uuid device_uuid = 1;
+}
+
+message Device {
+  DeviceId device_id = 1;
+  string name = 2;
+  string device_type = 3;
+  DeviceConfig device_config = 4;
+  DeviceOperationalStatusEnum device_operational_status = 5;
+  repeated DeviceDriverEnum device_drivers = 6;
+  repeated EndPoint device_endpoints = 7;
+  repeated Component components = 8; // Used for inventory
+  DeviceId controller_id = 9; // Identifier of node controlling the actual device
+}
+
+message Component {                         //Defined previously to this section - Tested OK
+  Uuid component_uuid   = 1;
+  string name           = 2;
+  string type           = 3;
+  
+  map<string, string> attributes = 4; // dict[attr.name => json.dumps(attr.value)]
+  string parent         = 5;
+}
+
+message DeviceConfig {
+  repeated ConfigRule config_rules = 1;
+}
+
+enum DeviceDriverEnum {
+  DEVICEDRIVER_UNDEFINED = 0; // also used for emulated
+  DEVICEDRIVER_OPENCONFIG = 1;
+  DEVICEDRIVER_TRANSPORT_API = 2;
+  DEVICEDRIVER_P4 = 3;
+  DEVICEDRIVER_IETF_NETWORK_TOPOLOGY = 4;
+  DEVICEDRIVER_ONF_TR_532 = 5;
+  DEVICEDRIVER_XR = 6;
+  DEVICEDRIVER_IETF_L2VPN = 7;
+  DEVICEDRIVER_GNMI_OPENCONFIG = 8;
+  DEVICEDRIVER_OPTICAL_TFS = 9;
+  DEVICEDRIVER_IETF_ACTN = 10;
+  DEVICEDRIVER_OC = 11;
+  DEVICEDRIVER_QKD = 12;
+  DEVICEDRIVER_RYU = 13;
+}
+
+enum DeviceOperationalStatusEnum {
+  DEVICEOPERATIONALSTATUS_UNDEFINED = 0;
+  DEVICEOPERATIONALSTATUS_DISABLED = 1;
+  DEVICEOPERATIONALSTATUS_ENABLED = 2;
+}
+
+message DeviceIdList {
+  repeated DeviceId device_ids = 1;
+}
+
+message DeviceList {
+  repeated Device devices = 1;
+}
+
+message DeviceFilter {
+  DeviceIdList device_ids = 1;
+  bool include_endpoints = 2;
+  bool include_config_rules = 3;
+  bool include_components = 4;
+}
+
+message DeviceEvent {
+  Event event = 1;
+  DeviceId device_id = 2;
+  DeviceConfig device_config = 3;
+}
+
+
+// ----- Link ----------------------------------------------------------------------------------------------------------
+message LinkId {
+  Uuid link_uuid = 1;
+}
+
+message LinkAttributes {
+  float total_capacity_gbps = 1;
+  float used_capacity_gbps  = 2;
+}
+
+message Link {
+  LinkId link_id = 1;
+  string name = 2;
+  repeated EndPointId link_endpoint_ids = 3;
+  LinkAttributes attributes = 4;
+  LinkTypeEnum link_type = 5;
+}
+
+message LinkIdList {
+  repeated LinkId link_ids = 1;
+}
+
+message LinkList {
+  repeated Link links = 1;
+}
+
+message LinkEvent {
+  Event event = 1;
+  LinkId link_id = 2;
+}
+
+enum LinkTypeEnum {
+  LINKTYPE_UNKNOWN = 0;
+  LINKTYPE_COPPER = 1;
+  LINKTYPE_VIRTUAL_COPPER = 2;
+  LINKTYPE_OPTICAL = 3;
+  LINKTYPE_VIRTUAL_OPTICAL = 4;
+}
+
+// ----- Service -------------------------------------------------------------------------------------------------------
+message ServiceId {
+  ContextId context_id = 1;
+  Uuid service_uuid = 2;
+}
+
+message Service {
+  ServiceId service_id = 1;
+  string name = 2;
+  ServiceTypeEnum service_type = 3;
+  repeated EndPointId service_endpoint_ids = 4;
+  repeated Constraint service_constraints = 5;
+  ServiceStatus service_status = 6;
+  ServiceConfig service_config = 7;
+  Timestamp timestamp = 8;
+}
+
+enum ServiceTypeEnum {
+  SERVICETYPE_UNKNOWN = 0;
+  SERVICETYPE_L3NM = 1;
+  SERVICETYPE_L2NM = 2;
+  SERVICETYPE_TAPI_CONNECTIVITY_SERVICE = 3;
+  SERVICETYPE_TE = 4;
+  SERVICETYPE_E2E = 5;
+  SERVICETYPE_OPTICAL_CONNECTIVITY = 6;
+  SERVICETYPE_QKD = 7;
+}
+
+enum ServiceStatusEnum {
+  SERVICESTATUS_UNDEFINED = 0;
+  SERVICESTATUS_PLANNED = 1;
+  SERVICESTATUS_ACTIVE = 2;
+  SERVICESTATUS_UPDATING = 3;
+  SERVICESTATUS_PENDING_REMOVAL = 4;
+  SERVICESTATUS_SLA_VIOLATED = 5;
+}
+
+message ServiceStatus {
+  ServiceStatusEnum service_status = 1;
+}
+
+message ServiceConfig {
+  repeated ConfigRule config_rules = 1;
+}
+
+message ServiceIdList {
+  repeated ServiceId service_ids = 1;
+}
+
+message ServiceList {
+  repeated Service services = 1;
+}
+
+message ServiceFilter {
+  ServiceIdList service_ids = 1;
+  bool include_endpoint_ids = 2;
+  bool include_constraints = 3;
+  bool include_config_rules = 4;
+}
+
+message ServiceEvent {
+  Event event = 1;
+  ServiceId service_id = 2;
+}
+
+// ----- Slice ---------------------------------------------------------------------------------------------------------
+message SliceId {
+  ContextId context_id = 1;
+  Uuid slice_uuid = 2;
+}
+
+message Slice {
+  SliceId slice_id = 1;
+  string name = 2;
+  repeated EndPointId slice_endpoint_ids = 3;
+  repeated Constraint slice_constraints = 4;
+  repeated ServiceId slice_service_ids = 5;
+  repeated SliceId slice_subslice_ids = 6;
+  SliceStatus slice_status = 7;
+  SliceConfig slice_config = 8;
+  SliceOwner slice_owner = 9;
+  Timestamp timestamp = 10;
+}
+
+message SliceOwner {
+  Uuid owner_uuid = 1;
+  string owner_string = 2;
+}
+
+enum SliceStatusEnum {
+  SLICESTATUS_UNDEFINED    = 0;
+  SLICESTATUS_PLANNED      = 1;
+  SLICESTATUS_INIT         = 2;
+  SLICESTATUS_ACTIVE       = 3;
+  SLICESTATUS_DEINIT       = 4;
+  SLICESTATUS_SLA_VIOLATED = 5;
+}
+
+message SliceStatus {
+  SliceStatusEnum slice_status = 1;
+}
+
+message SliceConfig {
+  repeated ConfigRule config_rules = 1;
+}
+
+message SliceIdList {
+  repeated SliceId slice_ids = 1;
+}
+
+message SliceList {
+  repeated Slice slices = 1;
+}
+
+message SliceFilter {
+  SliceIdList slice_ids = 1;
+  bool include_endpoint_ids = 2;
+  bool include_constraints = 3;
+  bool include_service_ids = 4;
+  bool include_subslice_ids = 5;
+  bool include_config_rules = 6;
+}
+
+message SliceEvent {
+  Event event = 1;
+  SliceId slice_id = 2;
+}
+
+// ----- Connection ----------------------------------------------------------------------------------------------------
+message ConnectionId {
+  Uuid connection_uuid = 1;
+}
+
+message ConnectionSettings_L0 {
+  string lsp_symbolic_name = 1;
+}
+
+message ConnectionSettings_L2 {
+  string src_mac_address = 1;
+  string dst_mac_address = 2;
+  uint32 ether_type = 3;
+  uint32 vlan_id = 4;
+  uint32 mpls_label = 5;
+  uint32 mpls_traffic_class = 6;
+}
+
+message ConnectionSettings_L3 {
+  string src_ip_address = 1;
+  string dst_ip_address = 2;
+  uint32 dscp = 3;
+  uint32 protocol = 4;
+  uint32 ttl = 5;
+}
+
+message ConnectionSettings_L4 {
+  uint32 src_port = 1;
+  uint32 dst_port = 2;
+  uint32 tcp_flags = 3;
+  uint32 ttl = 4;
+}
+
+message ConnectionSettings {
+  ConnectionSettings_L0 l0 = 1;
+  ConnectionSettings_L2 l2 = 2;
+  ConnectionSettings_L3 l3 = 3;
+  ConnectionSettings_L4 l4 = 4;
+}
+
+message Connection {
+  ConnectionId connection_id = 1;
+  ServiceId service_id = 2;
+  repeated EndPointId path_hops_endpoint_ids = 3;
+  repeated ServiceId sub_service_ids = 4;
+  ConnectionSettings settings = 5;
+}
+
+message ConnectionIdList {
+  repeated ConnectionId connection_ids = 1;
+}
+
+message ConnectionList {
+  repeated Connection connections = 1;
+}
+
+message ConnectionEvent {
+  Event event = 1;
+  ConnectionId connection_id = 2;
+}
+
+
+// ----- Endpoint ------------------------------------------------------------------------------------------------------
+message EndPointId {
+  TopologyId topology_id = 1;
+  DeviceId device_id = 2;
+  Uuid endpoint_uuid = 3;
+}
+
+message EndPoint {
+  EndPointId endpoint_id = 1;
+  string name = 2;
+  string endpoint_type = 3;
+  repeated kpi_sample_types.KpiSampleType kpi_sample_types = 4;
+  Location endpoint_location = 5;
+}
+
+message EndPointName {
+  EndPointId endpoint_id = 1;
+  string device_name = 2;
+  string endpoint_name = 3;
+  string endpoint_type = 4;
+}
+
+message EndPointIdList {
+  repeated EndPointId endpoint_ids = 1;
+}
+
+message EndPointNameList {
+  repeated EndPointName endpoint_names = 1;
+}
+
+
+// ----- Configuration -------------------------------------------------------------------------------------------------
+enum ConfigActionEnum {
+  CONFIGACTION_UNDEFINED = 0;
+  CONFIGACTION_SET       = 1;
+  CONFIGACTION_DELETE    = 2;
+}
+
+message ConfigRule_Custom {
+  string resource_key = 1;
+  string resource_value = 2;
+}
+
+message ConfigRule_ACL {
+  EndPointId endpoint_id = 1;
+  acl.AclRuleSet rule_set = 2;
+}
+
+message ConfigRule {
+  ConfigActionEnum action = 1;
+  oneof config_rule {
+    ConfigRule_Custom custom = 2;
+    ConfigRule_ACL acl = 3;
+  }
+}
+
+
+// ----- Constraint ----------------------------------------------------------------------------------------------------
+enum ConstraintActionEnum {
+  CONSTRAINTACTION_UNDEFINED = 0;
+  CONSTRAINTACTION_SET       = 1;
+  CONSTRAINTACTION_DELETE    = 2;
+}
+
+message Constraint_Custom {
+  string constraint_type = 1;
+  string constraint_value = 2;
+}
+
+message Constraint_Schedule {
+  double start_timestamp = 1;
+  float duration_days = 2;
+}
+
+message GPS_Position {
+  float latitude = 1;
+  float longitude = 2;
+}
+
+message Location {
+  oneof location {
+    string region = 1;
+    GPS_Position gps_position = 2;
+  }
+}
+
+message Constraint_EndPointLocation {
+  EndPointId endpoint_id = 1;
+  Location location = 2;
+}
+
+message Constraint_EndPointPriority {
+  EndPointId endpoint_id = 1;
+  uint32 priority = 2;
+}
+
+message Constraint_SLA_Latency {
+  float e2e_latency_ms = 1;
+}
+
+message Constraint_SLA_Capacity {
+  float capacity_gbps = 1;
+}
+
+message Constraint_SLA_Availability {
+  uint32 num_disjoint_paths = 1;
+  bool all_active = 2;
+  float availability = 3; // 0.0 .. 100.0 percentage of availability
+}
+
+enum IsolationLevelEnum {
+  NO_ISOLATION = 0;
+  PHYSICAL_ISOLATION = 1;
+  LOGICAL_ISOLATION = 2;
+  PROCESS_ISOLATION = 3;
+  PHYSICAL_MEMORY_ISOLATION = 4;
+  PHYSICAL_NETWORK_ISOLATION = 5;
+  VIRTUAL_RESOURCE_ISOLATION = 6;
+  NETWORK_FUNCTIONS_ISOLATION = 7;
+  SERVICE_ISOLATION = 8;
+}
+
+message Constraint_SLA_Isolation_level {
+  repeated IsolationLevelEnum isolation_level = 1;
+}
+
+message Constraint_Exclusions {
+  bool is_permanent = 1;
+  repeated DeviceId device_ids = 2;
+  repeated EndPointId endpoint_ids = 3;
+  repeated LinkId link_ids = 4;
+}
+
+
+message QoSProfileId {
+  context.Uuid qos_profile_id = 1;
+}
+
+message Constraint_QoSProfile {
+  QoSProfileId qos_profile_id = 1;
+  string qos_profile_name = 2;
+}
+
+message Constraint {
+  ConstraintActionEnum action = 1;
+  oneof constraint {
+    Constraint_Custom custom = 2;
+    Constraint_Schedule schedule = 3;
+    Constraint_EndPointLocation endpoint_location = 4;
+    Constraint_EndPointPriority endpoint_priority = 5;
+    Constraint_SLA_Capacity sla_capacity = 6;
+    Constraint_SLA_Latency sla_latency = 7;
+    Constraint_SLA_Availability sla_availability = 8;
+    Constraint_SLA_Isolation_level sla_isolation = 9;
+    Constraint_Exclusions exclusions = 10;
+    Constraint_QoSProfile qos_profile = 11;
+  }
+}
+
+
+// ----- Miscellaneous -------------------------------------------------------------------------------------------------
+message TeraFlowController {
+  ContextId context_id = 1;
+  string ip_address = 2;
+  uint32 port = 3;
+}
+
+message AuthenticationResult {
+  ContextId context_id = 1;
+  bool authenticated = 2;
+}
+
+// ---------------- Experimental ------------------------
+message OpticalConfigId {
+  string opticalconfig_uuid = 1;
+}
+message OpticalConfig {
+  OpticalConfigId opticalconfig_id = 1;
+  string config = 2;
+}
+
+message OpticalConfigList {
+  repeated OpticalConfig opticalconfigs = 1;
+}
+
+// ---- Optical Link ----
+
+message OpticalLinkId {
+  Uuid optical_link_uuid = 1;
+}
+
+message FiberId {
+  Uuid fiber_uuid = 1;
+}
+
+message Fiber {
+  string ID = 10;
+  string src_port = 1;
+  string dst_port = 2;
+  string local_peer_port =  3;
+  string remote_peer_port = 4;
+  repeated int32 c_slots = 5;
+  repeated int32 l_slots = 6;
+  repeated int32 s_slots = 7;
+  float length = 8;
+  bool used = 9;
+  FiberId fiber_uuid = 11;
+ 
+}
+message OpticalLinkDetails {
+  float length = 1;
+  string source = 2;
+  string target = 3;
+  repeated Fiber fibers = 4;
+}
+
+message OpticalLink {
+  string name = 1;
+  OpticalLinkDetails details = 2;
+  OpticalLinkId optical_link_uuid = 3;
+}
diff --git a/tmp-code/run_openflow.sh b/tmp-code/run_openflow.sh
new file mode 100755
index 0000000000000000000000000000000000000000..2c525ca70242374ebe7c09993833cee867455167
--- /dev/null
+++ b/tmp-code/run_openflow.sh
@@ -0,0 +1,8 @@
+PROJECTDIR=`pwd`
+
+cd $PROJECTDIR/src
+RCFILE=$PROJECTDIR/coverage/.coveragerc
+
+# Run unitary tests and analyze coverage of code at same time
+coverage run --rcfile=$RCFILE --append -m pytest --log-level=DEBUG --verbose \
+    device/tests/test_OpenFlow.py
\ No newline at end of file
diff --git a/tmp-code/test_OpenFlow.py b/tmp-code/test_OpenFlow.py
new file mode 100644
index 0000000000000000000000000000000000000000..60ee4542c768a50747be95e490254cb027eba6e2
--- /dev/null
+++ b/tmp-code/test_OpenFlow.py
@@ -0,0 +1,77 @@
+import json
+from re import A
+import resource
+import logging, os, sys, time
+#from typing import Dict, Self, Tuple
+os.environ['DEVICE_EMULATED_ONLY'] = 'YES'
+from device.service.drivers.OpenFlow.OpenFlowDriver import OpenFlowDriver
+logging.basicConfig(level=logging.DEBUG)
+LOGGER = logging.getLogger(__name__)
+LOGGER.setLevel(logging.DEBUG)
+
+
+def test_main():
+    driver_settings = {
+        'protocol': 'http', 
+        'username': None,  
+        'password': None,  
+        'use_tls': False, 
+    }
+    driver = OpenFlowDriver('127.0.0.1', 8080 , **driver_settings)
+    driver.Connect()
+  
+    
+    # Test: GetConfig
+    #resource_keys = [ 'flows:1','description:1','switches','port_description:1','switch_info','links_info']
+  #  config = driver.GetConfig(resource_keys )
+  #  LOGGER.info('Specific configuration: %s', config)
+    
+    #resource_delete=["flowentry_delete:1"]
+    #config = driver.DeleteConfig(resource_delete)
+    #LOGGER.info('Specific configuration: %s', config)
+    #a=driver.GetConfig(["flows:1"])
+    #LOGGER.info('flow 1 = {:s}'.format(str(a)))
+#    delete_data = {
+#    "dpid": 2,
+#    "cookie": 1,
+#    "cookie_mask": 1,
+#    "table_id": 0,
+#    "idle_timeout": 30,
+#    "hard_timeout": 30,
+#    "priority": 11111,
+#    "flags": 1,
+#    "match":{
+#        "in_port":2
+#    },
+#    "actions":[
+#        {
+#            "type":"ddf",
+#            "port": 1
+#        }
+#    ]
+# }
+#    delete_result = driver.DeleteConfig([("flow_data", delete_data)])
+#    LOGGER.info('resources_to_delete = {:s}'.format(str(delete_result)))
+#    a=driver.GetConfig(["flows:1"])
+#    LOGGER.info('flow 2 = {:s}'.format(str(a)))
+    flow_data = { 
+        "dpid": 2,
+        "priority": 22224,
+        "match": {
+            "in_port": 1
+        },
+        "actions": [
+            {
+                "type": "GOTO_TABLE",
+                "table_id": 1
+            }
+        ]
+    }
+    set_result = driver.SetConfig([('flow_data',flow_data)])
+    LOGGER.info(set_result)
+    driver.Disconnect()
+    
+    raise Exception ()
+
+if __name__ == '__main__':
+    sys.exit(test_main())