diff --git a/hackfest/tfs-descriptors/device-all.json b/hackfest/tfs-descriptors/device-all.json index 8cb8e031488f0dd1fa4176b8d20d01fe2d24abc9..36a93fe98d253dbc5e6db7f91b3b890e529c2ffc 100644 --- a/hackfest/tfs-descriptors/device-all.json +++ b/hackfest/tfs-descriptors/device-all.json @@ -9,7 +9,7 @@ {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": { "username": "admin", "password": "admin", "force_running": false, "hostkey_verify": false, "look_for_keys": false, - "allow_agent": false, "delete_rule": true, "device_params": {"name": "default"}, + "allow_agent": false, "commit_per_rule": true, "device_params": {"name": "default"}, "manager_params": {"timeout" : 120} }}} ]}, @@ -26,7 +26,7 @@ {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": { "username": "admin", "password": "admin", "force_running": false, "hostkey_verify": false, "look_for_keys": false, - "allow_agent": false, "delete_rule": true, "device_params": {"name": "default"}, + "allow_agent": false, "commit_per_rule": true, "device_params": {"name": "default"}, "manager_params": {"timeout" : 120} }}} ]}, @@ -43,7 +43,7 @@ {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": { "username": "admin", "password": "admin", "force_running": false, "hostkey_verify": false, "look_for_keys": false, - "allow_agent": false, "delete_rule": true, "device_params": {"name": "default"}, + "allow_agent": false, "commit_per_rule": true, "device_params": {"name": "default"}, "manager_params": {"timeout" : 120} }}} ]}, @@ -60,7 +60,7 @@ {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": { "username": "admin", "password": "admin", "force_running": false, "hostkey_verify": false, "look_for_keys": false, - "allow_agent": false, "delete_rule": true, "device_params": {"name": "default"}, + "allow_agent": false, "commit_per_rule": true, "device_params": {"name": "default"}, "manager_params": {"timeout" : 120} }}} ]}, diff --git a/hackfest/tfs-descriptors/device-netconf-openconfig.json b/hackfest/tfs-descriptors/device-netconf-openconfig.json index 7e01f037e744493a8cd1190b2510ed3d4d1c86aa..490e36efde5b428781b945ccc5060eb7b29a558a 100644 --- a/hackfest/tfs-descriptors/device-netconf-openconfig.json +++ b/hackfest/tfs-descriptors/device-netconf-openconfig.json @@ -9,7 +9,7 @@ {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": { "username": "admin", "password": "admin", "force_running": false, "hostkey_verify": false, "look_for_keys": false, - "allow_agent": false, "delete_rule": true, "device_params": {"name": "default"}, + "allow_agent": false, "commit_per_rule": true, "device_params": {"name": "default"}, "manager_params": {"timeout" : 15} }}} ]}, diff --git a/hackfest/tfs-descriptors/old/device.json b/hackfest/tfs-descriptors/old/device.json index 03736314dee9ea0a8aae27627361dcdd24457fca..abe529e729955b8048c00fd688ce1d1a8b5a0285 100644 --- a/hackfest/tfs-descriptors/old/device.json +++ b/hackfest/tfs-descriptors/old/device.json @@ -27,7 +27,7 @@ "device_config": {"config_rules": [ {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "10.0.2.15"}}, {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "8301"}}, - {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": "{\"username\": \"admin\", \"password\": \"admin\", \"force_running\": true, \"hostkey_verify\": false, \"look_for_keys\": false, \"allow_agent\": true, \"delete_rule\": false, \"device_params\" : {\"name\": \"default\"}, \"manager_params\" : {\"timeout\": 15}}"}} + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": "{\"username\": \"admin\", \"password\": \"admin\", \"force_running\": true, \"hostkey_verify\": false, \"look_for_keys\": false, \"allow_agent\": true, \"commit_per_rule\": false, \"device_params\" : {\"name\": \"default\"}, \"manager_params\" : {\"timeout\": 15}}"}} ]}, "device_operational_status": 1, "device_drivers": [1], @@ -39,7 +39,7 @@ "device_config": {"config_rules": [ {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "10.0.2.15"}}, {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "8302"}}, - {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": "{\"username\": \"admin\", \"password\": \"admin\", \"force_running\": true, \"hostkey_verify\": false, \"look_for_keys\": false, \"allow_agent\": true, \"delete_rule\": false, \"device_params\" : {\"name\": \"default\"}, \"manager_params\" : {\"timeout\": 15}}"}} + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": "{\"username\": \"admin\", \"password\": \"admin\", \"force_running\": true, \"hostkey_verify\": false, \"look_for_keys\": false, \"allow_agent\": true, \"commit_per_rule\": false, \"device_params\" : {\"name\": \"default\"}, \"manager_params\" : {\"timeout\": 15}}"}} ]}, "device_operational_status": 1, "device_drivers": [1], @@ -51,7 +51,7 @@ "device_config": {"config_rules": [ {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "10.0.2.15"}}, {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "8303"}}, - {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": "{\"username\": \"admin\", \"password\": \"admin\", \"force_running\": true, \"hostkey_verify\": false, \"look_for_keys\": false, \"allow_agent\": true, \"delete_rule\": false, \"device_params\" : {\"name\": \"default\"}, \"manager_params\" : {\"timeout\": 15}}"}} + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": "{\"username\": \"admin\", \"password\": \"admin\", \"force_running\": true, \"hostkey_verify\": false, \"look_for_keys\": false, \"allow_agent\": true, \"commit_per_rule\": false, \"device_params\" : {\"name\": \"default\"}, \"manager_params\" : {\"timeout\": 15}}"}} ]}, "device_operational_status": 1, "device_drivers": [1], diff --git a/src/common/method_wrappers/tests/deploy_specs.sh b/src/common/method_wrappers/tests/deploy_specs.sh index 571990ecabfbf120b517f44fd99b4550a4b8a9a1..1f41d2348e5a2d60c816071ef3414df281caeaaa 100755 --- a/src/common/method_wrappers/tests/deploy_specs.sh +++ b/src/common/method_wrappers/tests/deploy_specs.sh @@ -1,10 +1,11 @@ +#!/bin/bash # Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -12,21 +13,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Set the URL of your local Docker registry where the images will be uploaded to. -export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/" + +# ----- TeraFlowSDN ------------------------------------------------------------ + +# Set the URL of the internal MicroK8s Docker registry where the images will be uploaded to. +export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/" # Set the list of components, separated by spaces, you want to build images for, and deploy. -# Supported components are: -# context device automation policy service compute monitoring webui -# interdomain slice pathcomp dlt -# dbscanserving opticalattackmitigator opticalattackdetector -# l3_attackmitigator l3_centralizedattackdetector l3_distributedattackdetector -export TFS_COMPONENTS="context device pathcomp service slice webui load_generator" # automation monitoring compute dlt +export TFS_COMPONENTS="context device pathcomp service slice webui load_generator" # Set the tag you want to use for your images. export TFS_IMAGE_TAG="dev" -# Set the name of the Kubernetes namespace to deploy to. +# Set the name of the Kubernetes namespace to deploy TFS to. export TFS_K8S_NAMESPACE="tfs" # Set additional manifest files to be applied after the deployment @@ -35,6 +34,63 @@ export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml manifests/servicem # Set the new Grafana admin password export TFS_GRAFANA_PASSWORD="admin123+" -# If not already set, disable skip-build flag. -# If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used. -export TFS_SKIP_BUILD="NO" #${TFS_SKIP_BUILD:-"YES"} +# Disable skip-build flag to rebuild the Docker images. +export TFS_SKIP_BUILD="" + + +# ----- CockroachDB ------------------------------------------------------------ + +# Set the namespace where CockroackDB will be deployed. +export CRDB_NAMESPACE="crdb" + +# Set the database username to be used by Context. +export CRDB_USERNAME="tfs" + +# Set the database user's password to be used by Context. +export CRDB_PASSWORD="tfs123" + +# Set the database name to be used by Context. +export CRDB_DATABASE="tfs" + +# Set CockroachDB installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/crdb.sh for additional details +export CRDB_DEPLOY_MODE="single" + +# Disable flag for dropping database, if it exists. +export CRDB_DROP_DATABASE_IF_EXISTS="" + +# Disable flag for re-deploying CockroachDB from scratch. +export CRDB_REDEPLOY="" + + +# ----- NATS ------------------------------------------------------------------- + +# Set the namespace where NATS will be deployed. +export NATS_NAMESPACE="nats" + +# Disable flag for re-deploying NATS from scratch. +export NATS_REDEPLOY="" + + +# ----- QuestDB ---------------------------------------------------------------- + +# Set the namespace where QuestDB will be deployed. +export QDB_NAMESPACE="qdb" + +# Set the database username to be used for QuestDB. +export QDB_USERNAME="admin" + +# Set the database user's password to be used for QuestDB. +export QDB_PASSWORD="quest" + +# Set the table name to be used by Monitoring for KPIs. +export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis" + +# Set the table name to be used by Slice for plotting groups. +export QDB_TABLE_SLICE_GROUPS="tfs_slice_groups" + +# Disable flag for dropping tables if they exist. +export QDB_DROP_TABLES_IF_EXIST="" + +# Disable flag for re-deploying QuestDB from scratch. +export QDB_REDEPLOY="" diff --git a/src/device/service/drivers/openconfig/OpenConfigDriver.py b/src/device/service/drivers/openconfig/OpenConfigDriver.py index ef3d0728d5ed02ea4a15ba0c3ccd6f1428cab7df..ac03527529b603089c4f8233cb185f6427e0c360 100644 --- a/src/device/service/drivers/openconfig/OpenConfigDriver.py +++ b/src/device/service/drivers/openconfig/OpenConfigDriver.py @@ -38,8 +38,6 @@ logging.getLogger('apscheduler.executors.default').setLevel(logging.INFO if DEBU logging.getLogger('apscheduler.scheduler').setLevel(logging.INFO if DEBUG_MODE else logging.ERROR) logging.getLogger('monitoring-client').setLevel(logging.INFO if DEBUG_MODE else logging.ERROR) -LOGGER = logging.getLogger(__name__) - RE_GET_ENDPOINT_FROM_INTERFACE_KEY = re.compile(r'.*interface\[([^\]]+)\].*') RE_GET_ENDPOINT_FROM_INTERFACE_XPATH = re.compile(r".*interface\[oci\:name\='([^\]]+)'\].*") @@ -60,18 +58,18 @@ class NetconfSessionHandler: self.__connected = threading.Event() self.__address = address self.__port = int(port) - self.__username = settings.get('username') - self.__password = settings.get('password') - self.__vendor = settings.get('vendor') - self.__key_filename = settings.get('key_filename') - self.__hostkey_verify = settings.get('hostkey_verify', True) - self.__look_for_keys = settings.get('look_for_keys', True) - self.__allow_agent = settings.get('allow_agent', True) - self.__force_running = settings.get('force_running', False) - self.__commit_per_delete = settings.get('delete_rule', False) - self.__device_params = settings.get('device_params', {}) - self.__manager_params = settings.get('manager_params', {}) - self.__nc_params = settings.get('nc_params', {}) + self.__username = settings.get('username') + self.__password = settings.get('password') + self.__vendor = settings.get('vendor') + self.__key_filename = settings.get('key_filename') + self.__hostkey_verify = settings.get('hostkey_verify', True) + self.__look_for_keys = settings.get('look_for_keys', True) + self.__allow_agent = settings.get('allow_agent', True) + self.__force_running = settings.get('force_running', False) + self.__commit_per_rule = settings.get('commit_per_rule', False) + self.__device_params = settings.get('device_params', {}) + self.__manager_params = settings.get('manager_params', {}) + self.__nc_params = settings.get('nc_params', {}) self.__manager : Manager = None self.__candidate_supported = False @@ -94,7 +92,7 @@ class NetconfSessionHandler: def use_candidate(self): return self.__candidate_supported and not self.__force_running @property - def commit_per_rule(self): return self.__commit_per_delete + def commit_per_rule(self): return self.__commit_per_rule @property def vendor(self): return self.__vendor @@ -141,8 +139,9 @@ def compute_delta_sample(previous_sample, previous_timestamp, current_sample, cu return delta_sample class SamplesCache: - def __init__(self, netconf_handler : NetconfSessionHandler) -> None: + def __init__(self, netconf_handler : NetconfSessionHandler, logger : logging.Logger) -> None: self.__netconf_handler = netconf_handler + self.__logger = logger self.__lock = threading.Lock() self.__timestamp = None self.__absolute_samples = {} @@ -166,7 +165,7 @@ class SamplesCache: self.__absolute_samples[interface] = samples self.__timestamp = now except: # pylint: disable=bare-except - LOGGER.exception('Error collecting samples') + self.__logger.exception('Error collecting samples') def get(self, resource_key : str) -> Tuple[float, Dict]: self._refresh_samples() @@ -176,31 +175,33 @@ class SamplesCache: interface = match.group(1) return self.__timestamp, copy.deepcopy(self.__delta_samples.get(interface, {})) -def do_sampling(samples_cache : SamplesCache, resource_key : str, out_samples : queue.Queue) -> None: +def do_sampling( + samples_cache : SamplesCache, logger : logging.Logger, resource_key : str, out_samples : queue.Queue +) -> None: try: timestamp, samples = samples_cache.get(resource_key) counter_name = resource_key.split('/')[-1].split(':')[-1] value = samples.get(counter_name) if value is None: - LOGGER.warning('[do_sampling] value not found for {:s}'.format(resource_key)) + logger.warning('[do_sampling] value not found for {:s}'.format(resource_key)) return sample = (timestamp, resource_key, value) out_samples.put_nowait(sample) except: # pylint: disable=bare-except - LOGGER.exception('Error retrieving samples') + logger.exception('Error retrieving samples') def edit_config( - netconf_handler : NetconfSessionHandler, resources : List[Tuple[str, Any]], delete=False, commit_per_rule= False, - target='running', default_operation='merge', test_option=None, error_option=None, + netconf_handler : NetconfSessionHandler, logger : logging.Logger, resources : List[Tuple[str, Any]], delete=False, + commit_per_rule=False, target='running', default_operation='merge', test_option=None, error_option=None, format='xml' # pylint: disable=redefined-builtin ): str_method = 'DeleteConfig' if delete else 'SetConfig' - LOGGER.info('[{:s}] resources = {:s}'.format(str_method, str(resources))) + #logger.debug('[{:s}] resources = {:s}'.format(str_method, str(resources))) results = [None for _ in resources] for i,resource in enumerate(resources): str_resource_name = 'resources[#{:d}]'.format(i) try: - LOGGER.info('[{:s}] resource = {:s}'.format(str_method, str(resource))) + logger.debug('[{:s}] resource = {:s}'.format(str_method, str(resource))) chk_type(str_resource_name, resource, (list, tuple)) chk_length(str_resource_name, resource, min_length=2, max_length=2) resource_key,resource_value = resource @@ -208,7 +209,7 @@ def edit_config( str_config_message = compose_config( resource_key, resource_value, delete=delete, vendor=netconf_handler.vendor) if str_config_message is None: raise UnsupportedResourceKeyException(resource_key) - LOGGER.info('[{:s}] str_config_message[{:d}] = {:s}'.format( + logger.debug('[{:s}] str_config_message[{:d}] = {:s}'.format( str_method, len(str_config_message), str(str_config_message))) netconf_handler.edit_config( config=str_config_message, target=target, default_operation=default_operation, @@ -219,8 +220,17 @@ def edit_config( except Exception as e: # pylint: disable=broad-except str_operation = 'preparing' if target == 'candidate' else ('deleting' if delete else 'setting') msg = '[{:s}] Exception {:s} {:s}: {:s}' - LOGGER.exception(msg.format(str_method, str_operation, str_resource_name, str(resource))) + logger.exception(msg.format(str_method, str_operation, str_resource_name, str(resource))) results[i] = e # if validation fails, store the exception + + if not commit_per_rule: + try: + netconf_handler.commit() + except Exception as e: # pylint: disable=broad-except + msg = '[{:s}] Exception committing: {:s}' + str_operation = 'preparing' if target == 'candidate' else ('deleting' if delete else 'setting') + logger.exception(msg.format(str_method, str_operation, str(resources))) + results = [e for _ in resources] # if commit fails, set exception in each resource return results HISTOGRAM_BUCKETS = ( @@ -243,6 +253,7 @@ METRICS_POOL.get_or_create('UnsubscribeState', MetricTypeEnum.HISTOGRAM_DURATION class OpenConfigDriver(_Driver): def __init__(self, address : str, port : int, **settings) -> None: # pylint: disable=super-init-not-called + self.__logger = logging.getLogger('{:s}:[{:s}:{:s}]'.format(str(__name__), str(address), str(port))) self.__lock = threading.Lock() #self.__initial = TreeNode('.') #self.__running = TreeNode('.') @@ -257,7 +268,7 @@ class OpenConfigDriver(_Driver): timezone=pytz.utc) self.__out_samples = queue.Queue() self.__netconf_handler : NetconfSessionHandler = NetconfSessionHandler(address, port, **settings) - self.__samples_cache = SamplesCache(self.__netconf_handler) + self.__samples_cache = SamplesCache(self.__netconf_handler, self.__logger) def Connect(self) -> bool: with self.__lock: @@ -295,13 +306,14 @@ class OpenConfigDriver(_Driver): try: chk_string(str_resource_name, resource_key, allow_empty=False) str_filter = get_filter(resource_key) - LOGGER.info('[GetConfig] str_filter = {:s}'.format(str(str_filter))) + #self.__logger.debug('[GetConfig] str_filter = {:s}'.format(str(str_filter))) if str_filter is None: str_filter = resource_key xml_data = self.__netconf_handler.get(filter=str_filter).data_ele if isinstance(xml_data, Exception): raise xml_data results.extend(parse(resource_key, xml_data)) except Exception as e: # pylint: disable=broad-except - LOGGER.exception('Exception retrieving {:s}: {:s}'.format(str_resource_name, str(resource_key))) + MSG = 'Exception retrieving {:s}: {:s}' + self.__logger.exception(MSG.format(str_resource_name, str(resource_key))) results.append((resource_key, e)) # if validation fails, store the exception return results @@ -312,17 +324,11 @@ class OpenConfigDriver(_Driver): with self.__lock: if self.__netconf_handler.use_candidate: with self.__netconf_handler.locked(target='candidate'): - if self.__netconf_handler.commit_per_rule: - results = edit_config(self.__netconf_handler, resources, target='candidate', commit_per_rule= True) - else: - results = edit_config(self.__netconf_handler, resources, target='candidate') - try: - self.__netconf_handler.commit() - except Exception as e: # pylint: disable=broad-except - LOGGER.exception('[SetConfig] Exception commiting resources: {:s}'.format(str(resources))) - results = [e for _ in resources] # if commit fails, set exception in each resource + results = edit_config( + self.__netconf_handler, self.__logger, resources, target='candidate', + commit_per_rule=self.__netconf_handler.commit_per_rule) else: - results = edit_config(self.__netconf_handler, resources) + results = edit_config(self.__netconf_handler, self.__logger, resources) return results @metered_subclass_method(METRICS_POOL) @@ -332,17 +338,11 @@ class OpenConfigDriver(_Driver): with self.__lock: if self.__netconf_handler.use_candidate: with self.__netconf_handler.locked(target='candidate'): - if self.__netconf_handler.commit_per_rule: - results = edit_config(self.__netconf_handler, resources, target='candidate', delete=True, commit_per_rule= True) - else: - results = edit_config(self.__netconf_handler, resources, target='candidate', delete=True) - try: - self.__netconf_handler.commit() - except Exception as e: # pylint: disable=broad-except - LOGGER.exception('[DeleteConfig] Exception commiting resources: {:s}'.format(str(resources))) - results = [e for _ in resources] # if commit fails, set exception in each resource + results = edit_config( + self.__netconf_handler, self.__logger, resources, target='candidate', delete=True, + commit_per_rule=self.__netconf_handler.commit_per_rule) else: - results = edit_config(self.__netconf_handler, resources, delete=True) + results = edit_config(self.__netconf_handler, self.__logger, resources, delete=True) return results @metered_subclass_method(METRICS_POOL) @@ -363,7 +363,8 @@ class OpenConfigDriver(_Driver): chk_float(str_subscription_name + '.sampling_duration', sampling_duration, min_value=0) chk_float(str_subscription_name + '.sampling_interval', sampling_interval, min_value=0) except Exception as e: # pylint: disable=broad-except - LOGGER.exception('Exception validating {:s}: {:s}'.format(str_subscription_name, str(resource_key))) + MSG = 'Exception validating {:s}: {:s}' + self.__logger.exception(MSG.format(str_subscription_name, str(resource_key))) results.append(e) # if validation fails, store the exception continue @@ -374,7 +375,7 @@ class OpenConfigDriver(_Driver): job_id = 'k={:s}/d={:f}/i={:f}'.format(resource_key, sampling_duration, sampling_interval) job = self.__scheduler.add_job( - do_sampling, args=(self.__samples_cache, resource_key, self.__out_samples), + do_sampling, args=(self.__samples_cache, self.__logger, resource_key, self.__out_samples), kwargs={}, id=job_id, trigger='interval', seconds=sampling_interval, start_date=start_date, end_date=end_date, timezone=pytz.utc) @@ -401,7 +402,8 @@ class OpenConfigDriver(_Driver): chk_float(str_subscription_name + '.sampling_duration', sampling_duration, min_value=0) chk_float(str_subscription_name + '.sampling_interval', sampling_interval, min_value=0) except Exception as e: # pylint: disable=broad-except - LOGGER.exception('Exception validating {:s}: {:s}'.format(str_subscription_name, str(resource_key))) + MSG = 'Exception validating {:s}: {:s}' + self.__logger.exception(MSG.format(str_subscription_name, str(resource_key))) results.append(e) # if validation fails, store the exception continue diff --git a/src/device/service/drivers/openconfig/templates/interface/edit_config.xml b/src/device/service/drivers/openconfig/templates/interface/edit_config.xml index 4bc53ff1ddfbebbdcef2a0b4c37770210726676b..220f062b5da09d26ff6ec271491d6d40cfd46669 100644 --- a/src/device/service/drivers/openconfig/templates/interface/edit_config.xml +++ b/src/device/service/drivers/openconfig/templates/interface/edit_config.xml @@ -4,6 +4,7 @@ {% if operation is defined and operation != 'delete' %} <config> <name>{{name}}</name> + <type xmlns:ianaift="urn:ietf:params:xml:ns:yang:iana-if-type">ianaift:{{type}}</type> <description></description> <mtu>{{mtu}}</mtu> </config> diff --git a/src/device/service/drivers/openconfig/templates/interface/subinterface/edit_config.xml b/src/device/service/drivers/openconfig/templates/interface/subinterface/edit_config.xml index 1bdb8efbff495f04ee90dadaffaa7412332531b7..e441004006e4cdd445f1d0244a9582b57956af40 100644 --- a/src/device/service/drivers/openconfig/templates/interface/subinterface/edit_config.xml +++ b/src/device/service/drivers/openconfig/templates/interface/subinterface/edit_config.xml @@ -1,7 +1,8 @@ <interfaces xmlns="http://openconfig.net/yang/interfaces" xmlns:oc-ip="http://openconfig.net/yang/interfaces/ip" > - <interface> + <interface{% if operation is defined %} xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0" nc:operation="{{operation}}"{% endif %}> <name>{{name}}</name> + {% if operation is defined and operation != 'delete' %} <config> <name>{{name}}</name> <type xmlns:ianaift="urn:ietf:params:xml:ns:yang:iana-if-type">ianaift:{{type}}</type> @@ -44,5 +45,6 @@ {% endif %} </subinterface> </subinterfaces> + {% endif %} </interface> </interfaces> diff --git a/src/device/service/drivers/openconfig/templates/network_instance/interface/edit_config.xml b/src/device/service/drivers/openconfig/templates/network_instance/interface/edit_config.xml index bf8c0c0770f9344fbed16f3a6b09f7fa99a978ef..855f321b4a69ba1e660487c108a05d0ec4b5d475 100644 --- a/src/device/service/drivers/openconfig/templates/network_instance/interface/edit_config.xml +++ b/src/device/service/drivers/openconfig/templates/network_instance/interface/edit_config.xml @@ -2,7 +2,7 @@ <network-instance> <name>{{name}}</name> <interfaces> - <interface> + <interface{% if operation is defined %} xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0" nc:operation="{{operation}}"{% endif %}> <id>{{id}}</id> <config> <id>{{id}}</id> diff --git a/src/device/tests/Device_OpenConfig_Template.py b/src/device/tests/Device_OpenConfig_Template.py index 8ab45337514bf354d8b338c8bb97721d099355f4..b9aae79a2b0e5a38a556e50dd2445592caca4daf 100644 --- a/src/device/tests/Device_OpenConfig_Template.py +++ b/src/device/tests/Device_OpenConfig_Template.py @@ -32,7 +32,7 @@ DEVICE_OC_CONNECT_RULES = json_device_connect_rules(DEVICE_OC_ADDRESS, DEVICE_OC 'hostkey_verify' : True, 'look_for_keys' : True, 'allow_agent' : True, - 'delete_rule' : False, + 'commit_per_rule': False, 'device_params' : {'name': 'default'}, 'manager_params' : {'timeout' : DEVICE_OC_TIMEOUT}, }) diff --git a/src/load_generator/load_gen/RequestGenerator.py b/src/load_generator/load_gen/RequestGenerator.py index e94dc0cb948d703f71925fd932e749ebb544650e..5c56ea6ec603f4e9bb3fc72d5baa47f05ea0c991 100644 --- a/src/load_generator/load_gen/RequestGenerator.py +++ b/src/load_generator/load_gen/RequestGenerator.py @@ -34,6 +34,20 @@ from .Parameters import Parameters LOGGER = logging.getLogger(__name__) +ROUTER_ID = { + 'R149': '5.5.5.5', + 'R155': '5.5.5.1', + 'R199': '5.5.5.6', + +} + +VIRTUAL_CIRCUIT = { + 'R149': '5.5.5.5', + 'R155': '5.5.5.1', + 'R199': '5.5.5.6', + +} + class RequestGenerator: def __init__(self, parameters : Parameters) -> None: self._parameters = parameters @@ -242,34 +256,38 @@ class RequestGenerator: ] vlan_id = num_request % 1000 - circuit_id = '{:03d}'.format(vlan_id) + circuit_id = '{:03d}'.format(vlan_id + 100) src_device_name = self._device_data[src_device_uuid]['name'] - src_router_id = '10.0.0.{:d}'.format(int(src_device_name.replace('R', ''))) + src_endpoint_name = self._device_endpoint_data[src_device_uuid][src_endpoint_uuid]['name'] + src_router_id = ROUTER_ID.get(src_device_name) + src_router_num = int(src_device_name.replace('R', '')) + if src_router_id is None: src_router_id = '10.0.0.{:d}'.format(src_router_num) dst_device_name = self._device_data[dst_device_uuid]['name'] - dst_router_id = '10.0.0.{:d}'.format(int(dst_device_name.replace('R', ''))) + dst_endpoint_name = self._device_endpoint_data[dst_device_uuid][dst_endpoint_uuid]['name'] + dst_router_num = int(dst_device_name.replace('R', '')) + dst_router_id = ROUTER_ID.get(dst_device_name) + if dst_router_id is None: dst_router_id = '10.0.0.{:d}'.format(dst_router_num) config_rules = [ json_config_rule_set('/settings', { 'mtu': 1512 }), json_config_rule_set( - '/device[{:s}]/endpoint[{:s}]/settings'.format(src_device_uuid, src_endpoint_uuid), { - 'router_id': src_router_id, - 'sub_interface_index': vlan_id, + '/device[{:s}]/endpoint[{:s}]/settings'.format(src_device_name, src_endpoint_name), { + 'sub_interface_index': 0, 'vlan_id': vlan_id, 'remote_router': dst_router_id, 'circuit_id': circuit_id, - }), + }), json_config_rule_set( - '/device[{:s}]/endpoint[{:s}]/settings'.format(dst_device_uuid, dst_endpoint_uuid), { - 'router_id': dst_router_id, - 'sub_interface_index': vlan_id, + '/device[{:s}]/endpoint[{:s}]/settings'.format(dst_device_name, dst_endpoint_name), { + 'sub_interface_index': 0, 'vlan_id': vlan_id, 'remote_router': src_router_id, 'circuit_id': circuit_id, - }), + }), ] return json_service_l2nm_planned( request_uuid, endpoint_ids=endpoint_ids, constraints=constraints, config_rules=config_rules) @@ -286,44 +304,56 @@ class RequestGenerator: json_constraint_sla_latency(e2e_latency_ms), ] - vlan_id = num_request % 1000 - bgp_as = 60000 + (num_request % 10000) - bgp_route_target = '{:5d}:{:03d}'.format(bgp_as, 333) + bgp_as = 65000 + (num_request % 10000) + + vlan_id = num_request % 100 +100 + x = num_request % 255 + y = num_request % 25 * num_request % 10 route_distinguisher = '{:5d}:{:03d}'.format(bgp_as, vlan_id) src_device_name = self._device_data[src_device_uuid]['name'] src_endpoint_name = self._device_endpoint_data[src_device_uuid][src_endpoint_uuid]['name'] - src_router_id = '10.0.0.{:d}'.format(int(src_device_name.replace('R', ''))) - src_address_ip = '.'.join([src_device_name.replace('R', ''), '0'] + src_endpoint_name.split('/')) + src_router_id = ROUTER_ID.get(src_device_name) + src_router_num = int(src_device_name.replace('R', '')) + if src_router_id is None: src_router_id = '10.0.0.{:d}'.format(src_router_num) + src_address_ip = '10.{:d}.{:d}.{:d}'.format(x, y, src_router_num) dst_device_name = self._device_data[dst_device_uuid]['name'] dst_endpoint_name = self._device_endpoint_data[dst_device_uuid][dst_endpoint_uuid]['name'] - dst_router_id = '10.0.0.{:d}'.format(int(dst_device_name.replace('R', ''))) - dst_address_ip = '.'.join([dst_device_name.replace('R', ''), '0'] + dst_endpoint_name.split('/')) + dst_router_num = int(dst_device_name.replace('R', '')) + dst_router_id = ROUTER_ID.get(dst_device_name) + if dst_router_id is None: dst_router_id = '10.0.0.{:d}'.format(dst_router_num) + dst_address_ip = '10.{:d}.{:d}.{:d}'.format(y, x, dst_router_num) + + policy_AZ = 'srv_{:d}_a'.format(vlan_id) + policy_ZA = 'srv_{:d}_b'.format(vlan_id) config_rules = [ json_config_rule_set('/settings', { - 'mtu' : 1512, 'bgp_as' : bgp_as, - 'bgp_route_target': bgp_route_target, + 'route_distinguisher': route_distinguisher, }), json_config_rule_set( - '/device[{:s}]/endpoint[{:s}]/settings'.format(src_device_uuid, src_endpoint_uuid), { + '/device[{:s}]/endpoint[{:s}]/settings'.format(src_device_name, src_endpoint_name), { 'router_id' : src_router_id, 'route_distinguisher': route_distinguisher, - 'sub_interface_index': vlan_id, + 'sub_interface_index': 0, 'vlan_id' : vlan_id, 'address_ip' : src_address_ip, 'address_prefix' : 16, + 'policy_AZ' : policy_AZ, + 'policy_ZA' : policy_ZA, }), json_config_rule_set( - '/device[{:s}]/endpoint[{:s}]/settings'.format(dst_device_uuid, dst_endpoint_uuid), { + '/device[{:s}]/endpoint[{:s}]/settings'.format(dst_device_name, dst_endpoint_name), { 'router_id' : dst_router_id, 'route_distinguisher': route_distinguisher, - 'sub_interface_index': vlan_id, + 'sub_interface_index': 0, 'vlan_id' : vlan_id, 'address_ip' : dst_address_ip, 'address_prefix' : 16, + 'policy_ZA' : policy_AZ, + 'policy_AZ' : policy_ZA, }), ] return json_service_l3nm_planned( diff --git a/src/service/service/service_handlers/l2nm_openconfig/ConfigRules.py b/src/service/service/service_handlers/l2nm_openconfig/ConfigRules.py index c2ea6e213ee8d18b4507089fb2762c913e03039a..07e78d73631342d101d77697098e83961c7dcf26 100644 --- a/src/service/service/service_handlers/l2nm_openconfig/ConfigRules.py +++ b/src/service/service/service_handlers/l2nm_openconfig/ConfigRules.py @@ -20,58 +20,64 @@ def setup_config_rules( service_uuid : str, connection_uuid : str, device_uuid : str, endpoint_uuid : str, endpoint_name : str, service_settings : TreeNode, endpoint_settings : TreeNode ) -> List[Dict]: + + if service_settings is None: return [] + if endpoint_settings is None: return [] + + json_settings : Dict = service_settings.value + json_endpoint_settings : Dict = endpoint_settings.value json_settings : Dict = {} if service_settings is None else service_settings.value json_endpoint_settings : Dict = {} if endpoint_settings is None else endpoint_settings.value - mtu = json_settings.get('mtu', 1450 ) # 1512 + #mtu = json_settings.get('mtu', 1450 ) # 1512 #address_families = json_settings.get('address_families', [] ) # ['IPV4'] #bgp_as = json_settings.get('bgp_as', 0 ) # 65000 #bgp_route_target = json_settings.get('bgp_route_target', '0:0') # 65000:333 - router_id = json_endpoint_settings.get('router_id', '0.0.0.0') # '10.95.0.10' + #router_id = json_endpoint_settings.get('router_id', '0.0.0.0') # '10.95.0.10' #route_distinguisher = json_endpoint_settings.get('route_distinguisher', '0:0' ) # '60001:801' sub_interface_index = json_endpoint_settings.get('sub_interface_index', 0 ) # 1 vlan_id = json_endpoint_settings.get('vlan_id', 1 ) # 400 #address_ip = json_endpoint_settings.get('address_ip', '0.0.0.0') # '2.2.2.1' #address_prefix = json_endpoint_settings.get('address_prefix', 24 ) # 30 - remote_router = json_endpoint_settings.get('remote_router', '0.0.0.0') # '5.5.5.5' - circuit_id = json_endpoint_settings.get('circuit_id', '000' ) # '111' + remote_router = json_endpoint_settings.get('remote_router', '5.5.5.5') # '5.5.5.5' + circuit_id = json_endpoint_settings.get('circuit_id', '111' ) # '111' + if_cirid_name = '{:s}.{:s}'.format(endpoint_name, str(circuit_id)) network_instance_name = 'ELAN-AC:{:s}'.format(str(circuit_id)) connection_point_id = 'VC-1' json_config_rules = [ - json_config_rule_set( - '/network_instance[default]', - {'name': 'default', 'type': 'DEFAULT_INSTANCE', 'router_id': router_id}), - - json_config_rule_set( - '/network_instance[default]/protocols[OSPF]', - {'name': 'default', 'identifier': 'OSPF', 'protocol_name': 'OSPF'}), - - json_config_rule_set( - '/network_instance[default]/protocols[STATIC]', - {'name': 'default', 'identifier': 'STATIC', 'protocol_name': 'STATIC'}), - + json_config_rule_set( '/network_instance[{:s}]'.format(network_instance_name), - {'name': network_instance_name, 'type': 'L2VSI'}), + {'name': network_instance_name, + 'type': 'L2VSI'}), json_config_rule_set( - '/interface[{:s}]/subinterface[{:d}]'.format(if_cirid_name, sub_interface_index), - {'name': if_cirid_name, 'type': 'l2vlan', 'index': sub_interface_index, 'vlan_id': vlan_id}), + '/interface[{:s}]/subinterface[0]'.format(if_cirid_name), + {'name': if_cirid_name, + 'type': 'l2vlan', + 'index': sub_interface_index, + 'vlan_id': vlan_id}), json_config_rule_set( '/network_instance[{:s}]/interface[{:s}]'.format(network_instance_name, if_cirid_name), - {'name': network_instance_name, 'id': if_cirid_name, 'interface': if_cirid_name, - 'subinterface': sub_interface_index}), + {'name': network_instance_name, + 'id': if_cirid_name, + 'interface': if_cirid_name, + 'subinterface': 0 + }), json_config_rule_set( '/network_instance[{:s}]/connection_point[{:s}]'.format(network_instance_name, connection_point_id), - {'name': network_instance_name, 'connection_point': connection_point_id, 'VC_ID': circuit_id, - 'remote_system': remote_router}), + {'name': network_instance_name, + 'connection_point': connection_point_id, + 'VC_ID': circuit_id, + 'remote_system': remote_router + }), ] return json_config_rules @@ -88,9 +94,9 @@ def teardown_config_rules( #bgp_as = json_settings.get('bgp_as', 0 ) # 65000 #bgp_route_target = json_settings.get('bgp_route_target', '0:0') # 65000:333 - router_id = json_endpoint_settings.get('router_id', '0.0.0.0') # '10.95.0.10' + #router_id = json_endpoint_settings.get('router_id', '0.0.0.0') # '10.95.0.10' #route_distinguisher = json_endpoint_settings.get('route_distinguisher', '0:0' ) # '60001:801' - sub_interface_index = json_endpoint_settings.get('sub_interface_index', 0 ) # 1 + #sub_interface_index = json_endpoint_settings.get('sub_interface_index', 0 ) # 1 #vlan_id = json_endpoint_settings.get('vlan_id', 1 ) # 400 #address_ip = json_endpoint_settings.get('address_ip', '0.0.0.0') # '2.2.2.1' #address_prefix = json_endpoint_settings.get('address_prefix', 24 ) # 30 @@ -99,36 +105,17 @@ def teardown_config_rules( if_cirid_name = '{:s}.{:s}'.format(endpoint_name, str(circuit_id)) network_instance_name = 'ELAN-AC:{:s}'.format(str(circuit_id)) - connection_point_id = 'VC-1' + #connection_point_id = 'VC-1' json_config_rules = [ - json_config_rule_delete( - '/network_instance[{:s}]/connection_point[{:s}]'.format(network_instance_name, connection_point_id), - {'name': network_instance_name, 'connection_point': connection_point_id}), - - json_config_rule_delete( - '/network_instance[{:s}]/interface[{:s}]'.format(network_instance_name, if_cirid_name), - {'name': network_instance_name, 'id': if_cirid_name, 'interface': if_cirid_name, - 'subinterface': sub_interface_index}), - - json_config_rule_delete( - '/interface[{:s}]/subinterface[{:d}]'.format(if_cirid_name, sub_interface_index), - {'name': if_cirid_name, 'index': sub_interface_index}), - json_config_rule_delete( '/network_instance[{:s}]'.format(network_instance_name), {'name': network_instance_name}), - - json_config_rule_delete( - '/network_instance[default]/protocols[STATIC]', - {'name': 'default', 'identifier': 'STATIC', 'protocol_name': 'STATIC'}), - - json_config_rule_delete( - '/network_instance[default]/protocols[OSPF]', - {'name': 'default', 'identifier': 'OSPF', 'protocol_name': 'OSPF'}), - + json_config_rule_delete( - '/network_instance[default]', - {'name': 'default', 'type': 'DEFAULT_INSTANCE', 'router_id': router_id}), + '/interface[{:s}]/subinterface[0]'.format(if_cirid_name),{ + 'name': if_cirid_name, + }), + ] return json_config_rules diff --git a/src/service/service/service_handlers/l2nm_openconfig/L2NMOpenConfigServiceHandler.py b/src/service/service/service_handlers/l2nm_openconfig/L2NMOpenConfigServiceHandler.py index 6eb139dbd03b4dff781a08548c03627512501ab5..d511c8947ecb43052fd154ab3ce3293a468b4263 100644 --- a/src/service/service/service_handlers/l2nm_openconfig/L2NMOpenConfigServiceHandler.py +++ b/src/service/service/service_handlers/l2nm_openconfig/L2NMOpenConfigServiceHandler.py @@ -75,10 +75,12 @@ class L2NMOpenConfigServiceHandler(_ServiceHandler): service_uuid, connection_uuid, device_uuid, endpoint_uuid, endpoint_name, settings, endpoint_settings) - del device_obj.device_config.config_rules[:] - for json_config_rule in json_config_rules: - device_obj.device_config.config_rules.append(ConfigRule(**json_config_rule)) - self.__task_executor.configure_device(device_obj) + if len(json_config_rules) > 0: + del device_obj.device_config.config_rules[:] + for json_config_rule in json_config_rules: + device_obj.device_config.config_rules.append(ConfigRule(**json_config_rule)) + self.__task_executor.configure_device(device_obj) + results.append(True) except Exception as e: # pylint: disable=broad-except LOGGER.exception('Unable to SetEndpoint({:s})'.format(str(endpoint))) @@ -110,10 +112,12 @@ class L2NMOpenConfigServiceHandler(_ServiceHandler): service_uuid, connection_uuid, device_uuid, endpoint_uuid, endpoint_name, settings, endpoint_settings) - del device_obj.device_config.config_rules[:] - for json_config_rule in json_config_rules: - device_obj.device_config.config_rules.append(ConfigRule(**json_config_rule)) - self.__task_executor.configure_device(device_obj) + if len(json_config_rules) > 0: + del device_obj.device_config.config_rules[:] + for json_config_rule in json_config_rules: + device_obj.device_config.config_rules.append(ConfigRule(**json_config_rule)) + self.__task_executor.configure_device(device_obj) + results.append(True) except Exception as e: # pylint: disable=broad-except LOGGER.exception('Unable to DeleteEndpoint({:s})'.format(str(endpoint))) diff --git a/src/service/service/service_handlers/l3nm_openconfig/ConfigRules.py b/src/service/service/service_handlers/l3nm_openconfig/ConfigRules.py index 903ad8cd5ae442a03d54fb49083f3837a3c8187c..ef93dcdda8145cab15ff21c24b6318e9eb00e098 100644 --- a/src/service/service/service_handlers/l3nm_openconfig/ConfigRules.py +++ b/src/service/service/service_handlers/l3nm_openconfig/ConfigRules.py @@ -21,120 +21,162 @@ def setup_config_rules( service_settings : TreeNode, endpoint_settings : TreeNode ) -> List[Dict]: - json_settings : Dict = {} if service_settings is None else service_settings.value - json_endpoint_settings : Dict = {} if endpoint_settings is None else endpoint_settings.value + if service_settings is None: return [] + if endpoint_settings is None: return [] + + json_settings : Dict = service_settings.value + json_endpoint_settings : Dict = endpoint_settings.value service_short_uuid = service_uuid.split('-')[-1] network_instance_name = '{:s}-NetInst'.format(service_short_uuid) network_interface_desc = '{:s}-NetIf'.format(service_uuid) network_subinterface_desc = '{:s}-NetSubIf'.format(service_uuid) - mtu = json_settings.get('mtu', 1450 ) # 1512 - #address_families = json_settings.get('address_families', [] ) # ['IPV4'] - bgp_as = json_settings.get('bgp_as', 0 ) # 65000 - bgp_route_target = json_settings.get('bgp_route_target', '0:0') # 65000:333 - - #router_id = json_endpoint_settings.get('router_id', '0.0.0.0') # '10.95.0.10' - route_distinguisher = json_endpoint_settings.get('route_distinguisher', '0:0' ) # '60001:801' + mtu = json_settings.get('mtu', 1450 ) # 1512 + #address_families = json_settings.get('address_families', [] ) # ['IPV4'] + bgp_as = json_settings.get('bgp_as', 65000 ) # 65000 + route_distinguisher = json_settings.get('route_distinguisher', '0:0' ) # '60001:801' sub_interface_index = json_endpoint_settings.get('sub_interface_index', 0 ) # 1 + router_id = json_endpoint_settings.get('router_id', '0.0.0.0') # '10.95.0.10' vlan_id = json_endpoint_settings.get('vlan_id', 1 ) # 400 address_ip = json_endpoint_settings.get('address_ip', '0.0.0.0') # '2.2.2.1' address_prefix = json_endpoint_settings.get('address_prefix', 24 ) # 30 + policy_import = json_endpoint_settings.get('policy_AZ', '2' ) # 2 + policy_export = json_endpoint_settings.get('policy_ZA', '7' ) # 30 + if_subif_name = '{:s}.{:d}'.format(endpoint_name, vlan_id) json_config_rules = [ + # Configure Interface (not used) + #json_config_rule_set( + # '/interface[{:s}]'.format(endpoint_name), { + # 'name': endpoint_name, 'description': network_interface_desc, 'mtu': mtu, + #}), + + #Create network instance json_config_rule_set( '/network_instance[{:s}]'.format(network_instance_name), { - 'name': network_instance_name, 'description': network_interface_desc, 'type': 'L3VRF', + 'name': network_instance_name, + 'description': network_interface_desc, + 'type': 'L3VRF', 'route_distinguisher': route_distinguisher, - #'router_id': router_id, 'address_families': address_families, - }), - json_config_rule_set( - '/interface[{:s}]'.format(endpoint_name), { - 'name': endpoint_name, 'description': network_interface_desc, 'mtu': mtu, - }), - json_config_rule_set( - '/interface[{:s}]/subinterface[{:d}]'.format(endpoint_name, sub_interface_index), { - 'name': endpoint_name, 'index': sub_interface_index, - 'description': network_subinterface_desc, 'vlan_id': vlan_id, - 'address_ip': address_ip, 'address_prefix': address_prefix, - }), - json_config_rule_set( - '/network_instance[{:s}]/interface[{:s}]'.format(network_instance_name, if_subif_name), { - 'name': network_instance_name, 'id': if_subif_name, 'interface': endpoint_name, - 'subinterface': sub_interface_index, + #'router_id': router_id, + #'address_families': address_families, }), + + #Add BGP protocol to network instance json_config_rule_set( '/network_instance[{:s}]/protocols[BGP]'.format(network_instance_name), { - 'name': network_instance_name, 'identifier': 'BGP', 'protocol_name': 'BGP', 'as': bgp_as, + 'name': network_instance_name, + 'protocol_name': 'BGP', + 'identifier': 'BGP', + 'as': bgp_as, + 'router_id': router_id, }), + + #Add DIRECTLY CONNECTED protocol to network instance json_config_rule_set( - '/network_instance[{:s}]/table_connections[STATIC][BGP][IPV4]'.format(network_instance_name), { - 'name': network_instance_name, 'src_protocol': 'STATIC', 'dst_protocol': 'BGP', - 'address_family': 'IPV4', #'default_import_policy': 'REJECT_ROUTE', + '/network_instance[{:s}]/protocols[DIRECTLY_CONNECTED]'.format(network_instance_name), { + 'name': network_instance_name, + 'identifier': 'DIRECTLY_CONNECTED', + 'protocol_name': 'DIRECTLY_CONNECTED', }), + + + #Add STATIC protocol to network instance json_config_rule_set( - '/network_instance[{:s}]/table_connections[DIRECTLY_CONNECTED][BGP][IPV4]'.format( - network_instance_name), { - 'name': network_instance_name, 'src_protocol': 'DIRECTLY_CONNECTED', 'dst_protocol': 'BGP', - 'address_family': 'IPV4', #'default_import_policy': 'REJECT_ROUTE', + '/network_instance[{:s}]/protocols[STATIC]'.format(network_instance_name), { + 'name': network_instance_name, + 'identifier': 'STATIC', + 'protocol_name': 'STATIC', }), + + #Create interface with subinterface json_config_rule_set( - '/routing_policy/bgp_defined_set[{:s}_rt_import]'.format(network_instance_name), { - 'ext_community_set_name': '{:s}_rt_import'.format(network_instance_name), + '/interface[{:s}]/subinterface[{:d}]'.format(if_subif_name, sub_interface_index), { + 'name' : if_subif_name, + 'type' :'l3ipvlan', + 'mtu' : mtu, + 'index' : sub_interface_index, + 'description' : network_subinterface_desc, + 'vlan_id' : vlan_id, + 'address_ip' : address_ip, + 'address_prefix': address_prefix, }), + + #Associate interface to network instance json_config_rule_set( - '/routing_policy/bgp_defined_set[{:s}_rt_import][route-target:{:s}]'.format( - network_instance_name, bgp_route_target), { - 'ext_community_set_name': '{:s}_rt_import'.format(network_instance_name), - 'ext_community_member' : 'route-target:{:s}'.format(bgp_route_target), - }), + '/network_instance[{:s}]/interface[{:s}]'.format(network_instance_name, if_subif_name), { + 'name' : network_instance_name, + 'id' : if_subif_name, + 'interface' : if_subif_name, + 'subinterface': sub_interface_index, + }), + + #Create routing policy json_config_rule_set( - '/routing_policy/policy_definition[{:s}_import]'.format(network_instance_name), { - 'policy_name': '{:s}_import'.format(network_instance_name), + '/routing_policy/bgp_defined_set[{:s}_rt_import][{:s}]'.format(policy_import,route_distinguisher), { + 'ext_community_set_name': 'set_{:s}'.format(policy_import), + 'ext_community_member' : route_distinguisher, }), json_config_rule_set( - '/routing_policy/policy_definition[{:s}_import]/statement[{:s}]'.format( - network_instance_name, '3'), { - 'policy_name': '{:s}_import'.format(network_instance_name), 'statement_name': '3', - 'ext_community_set_name': '{:s}_rt_import'.format(network_instance_name), - 'match_set_options': 'ANY', 'policy_result': 'ACCEPT_ROUTE', + # pylint: disable=duplicate-string-formatting-argument + '/routing_policy/policy_definition[{:s}_import]/statement[{:s}]'.format(policy_import, policy_import), { + 'policy_name' : policy_import, + 'statement_name' : 'stm_{:s}'.format(policy_import), + 'ext_community_set_name': 'set_{:s}'.format(policy_import), + 'policy_result' : 'ACCEPT_ROUTE', }), + + #Associate routing policy to network instance json_config_rule_set( - # pylint: disable=duplicate-string-formatting-argument - '/network_instance[{:s}]/inter_instance_policies[{:s}_import]'.format( - network_instance_name, network_instance_name), { - 'name': network_instance_name, 'import_policy': '{:s}_import'.format(network_instance_name), + '/network_instance[{:s}]/inter_instance_policies[{:s}]'.format(network_instance_name, policy_import), { + 'name' : network_instance_name, + 'import_policy': policy_import, }), + + #Create routing policy json_config_rule_set( - '/routing_policy/bgp_defined_set[{:s}_rt_export]'.format(network_instance_name), { - 'ext_community_set_name': '{:s}_rt_export'.format(network_instance_name), + '/routing_policy/bgp_defined_set[{:s}_rt_export][{:s}]'.format(policy_export, route_distinguisher), { + 'ext_community_set_name': 'set_{:s}'.format(policy_export), + 'ext_community_member' : route_distinguisher, }), json_config_rule_set( - '/routing_policy/bgp_defined_set[{:s}_rt_export][route-target:{:s}]'.format( - network_instance_name, bgp_route_target), { - 'ext_community_set_name': '{:s}_rt_export'.format(network_instance_name), - 'ext_community_member' : 'route-target:{:s}'.format(bgp_route_target), + # pylint: disable=duplicate-string-formatting-argument + '/routing_policy/policy_definition[{:s}_export]/statement[{:s}]'.format(policy_export, policy_export), { + 'policy_name' : policy_export, + 'statement_name' : 'stm_{:s}'.format(policy_export), + 'ext_community_set_name': 'set_{:s}'.format(policy_export), + 'policy_result' : 'ACCEPT_ROUTE', }), + + #Associate routing policy to network instance json_config_rule_set( - '/routing_policy/policy_definition[{:s}_export]'.format(network_instance_name), { - 'policy_name': '{:s}_export'.format(network_instance_name), + '/network_instance[{:s}]/inter_instance_policies[{:s}]'.format(network_instance_name, policy_export),{ + 'name' : network_instance_name, + 'export_policy': policy_export, }), + + #Create table connections json_config_rule_set( - '/routing_policy/policy_definition[{:s}_export]/statement[{:s}]'.format( - network_instance_name, '3'), { - 'policy_name': '{:s}_export'.format(network_instance_name), 'statement_name': '3', - 'ext_community_set_name': '{:s}_rt_export'.format(network_instance_name), - 'match_set_options': 'ANY', 'policy_result': 'ACCEPT_ROUTE', + '/network_instance[{:s}]/table_connections[DIRECTLY_CONNECTED][BGP][IPV4]'.format(network_instance_name), { + 'name' : network_instance_name, + 'src_protocol' : 'DIRECTLY_CONNECTED', + 'dst_protocol' : 'BGP', + 'address_family' : 'IPV4', + 'default_import_policy': 'ACCEPT_ROUTE', }), + json_config_rule_set( - # pylint: disable=duplicate-string-formatting-argument - '/network_instance[{:s}]/inter_instance_policies[{:s}_export]'.format( - network_instance_name, network_instance_name), { - 'name': network_instance_name, 'export_policy': '{:s}_export'.format(network_instance_name), + '/network_instance[{:s}]/table_connections[STATIC][BGP][IPV4]'.format(network_instance_name), { + 'name' : network_instance_name, + 'src_protocol' : 'STATIC', + 'dst_protocol' : 'BGP', + 'address_family' : 'IPV4', + 'default_import_policy': 'ACCEPT_ROUTE', }), - ] + ] return json_config_rules def teardown_config_rules( @@ -142,108 +184,86 @@ def teardown_config_rules( service_settings : TreeNode, endpoint_settings : TreeNode ) -> List[Dict]: + if service_settings is None: return [] + if endpoint_settings is None: return [] + json_settings : Dict = {} if service_settings is None else service_settings.value json_endpoint_settings : Dict = {} if endpoint_settings is None else endpoint_settings.value - #mtu = json_settings.get('mtu', 1450 ) # 1512 - #address_families = json_settings.get('address_families', [] ) # ['IPV4'] - #bgp_as = json_settings.get('bgp_as', 0 ) # 65000 - bgp_route_target = json_settings.get('bgp_route_target', '0:0') # 65000:333 + service_short_uuid = service_uuid.split('-')[-1] + network_instance_name = '{:s}-NetInst'.format(service_short_uuid) + #network_interface_desc = '{:s}-NetIf'.format(service_uuid) + #network_subinterface_desc = '{:s}-NetSubIf'.format(service_uuid) + #mtu = json_settings.get('mtu', 1450 ) # 1512 + #address_families = json_settings.get('address_families', [] ) # ['IPV4'] + #bgp_as = json_settings.get('bgp_as', 65000 ) # 65000 + route_distinguisher = json_settings.get('route_distinguisher', '0:0' ) # '60001:801' + #sub_interface_index = json_endpoint_settings.get('sub_interface_index', 0 ) # 1 #router_id = json_endpoint_settings.get('router_id', '0.0.0.0') # '10.95.0.10' - #route_distinguisher = json_endpoint_settings.get('route_distinguisher', '0:0' ) # '60001:801' - sub_interface_index = json_endpoint_settings.get('sub_interface_index', 0 ) # 1 vlan_id = json_endpoint_settings.get('vlan_id', 1 ) # 400 #address_ip = json_endpoint_settings.get('address_ip', '0.0.0.0') # '2.2.2.1' #address_prefix = json_endpoint_settings.get('address_prefix', 24 ) # 30 + policy_import = json_endpoint_settings.get('policy_AZ', '2' ) # 2 + policy_export = json_endpoint_settings.get('policy_ZA', '7' ) # 30 if_subif_name = '{:s}.{:d}'.format(endpoint_name, vlan_id) - service_short_uuid = service_uuid.split('-')[-1] - network_instance_name = '{:s}-NetInst'.format(service_short_uuid) - #network_interface_desc = '{:s}-NetIf'.format(service_uuid) - #network_subinterface_desc = '{:s}-NetSubIf'.format(service_uuid) json_config_rules = [ + #Delete table connections json_config_rule_delete( - '/network_instance[{:s}]/interface[{:s}]'.format(network_instance_name, if_subif_name), { - 'name': network_instance_name, 'id': if_subif_name, - }), - json_config_rule_delete( - '/interface[{:s}]/subinterface[{:d}]'.format(endpoint_name, sub_interface_index), { - 'name': endpoint_name, 'index': sub_interface_index, - }), - json_config_rule_delete( - '/interface[{:s}]'.format(endpoint_name), { - 'name': endpoint_name, - }), - json_config_rule_delete( - '/network_instance[{:s}]/table_connections[DIRECTLY_CONNECTED][BGP][IPV4]'.format( - network_instance_name), { - 'name': network_instance_name, 'src_protocol': 'DIRECTLY_CONNECTED', 'dst_protocol': 'BGP', - 'address_family': 'IPV4', + '/network_instance[{:s}]/table_connections[DIRECTLY_CONNECTED][BGP][IPV4]'.format(network_instance_name),{ + 'name' : network_instance_name, + 'src_protocol' : 'DIRECTLY_CONNECTED', + 'dst_protocol' : 'BGP', + 'address_family': 'IPV4', }), + + json_config_rule_delete( '/network_instance[{:s}]/table_connections[STATIC][BGP][IPV4]'.format(network_instance_name), { - 'name': network_instance_name, 'src_protocol': 'STATIC', 'dst_protocol': 'BGP', + 'name' : network_instance_name, + 'src_protocol' : 'STATIC', + 'dst_protocol' : 'BGP', 'address_family': 'IPV4', }), + + #Delete export routing policy + json_config_rule_delete( - '/network_instance[{:s}]/protocols[BGP]'.format(network_instance_name), { - 'name': network_instance_name, 'identifier': 'BGP', 'protocol_name': 'BGP', - }), - json_config_rule_delete( - # pylint: disable=duplicate-string-formatting-argument - '/network_instance[{:s}]/inter_instance_policies[{:s}_import]'.format( - network_instance_name, network_instance_name), { - 'name': network_instance_name, + '/routing_policy/policy_definition[{:s}_export]'.format(network_instance_name), { + 'policy_name': '{:s}_export'.format(network_instance_name), }), json_config_rule_delete( - '/routing_policy/policy_definition[{:s}_import]/statement[{:s}]'.format( - network_instance_name, '3'), { - 'policy_name': '{:s}_import'.format(network_instance_name), 'statement_name': '3', + '/routing_policy/bgp_defined_set[{:s}_rt_export][{:s}]'.format(policy_export, route_distinguisher), { + 'ext_community_set_name': 'set_{:s}'.format(policy_export), }), + + #Delete import routing policy + json_config_rule_delete( '/routing_policy/policy_definition[{:s}_import]'.format(network_instance_name), { 'policy_name': '{:s}_import'.format(network_instance_name), }), json_config_rule_delete( - '/routing_policy/bgp_defined_set[{:s}_rt_import][route-target:{:s}]'.format( - network_instance_name, bgp_route_target), { - 'ext_community_set_name': '{:s}_rt_import'.format(network_instance_name), - 'ext_community_member' : 'route-target:{:s}'.format(bgp_route_target), - }), - json_config_rule_delete( - '/routing_policy/bgp_defined_set[{:s}_rt_import]'.format(network_instance_name), { - 'ext_community_set_name': '{:s}_rt_import'.format(network_instance_name), - }), - json_config_rule_delete( - # pylint: disable=duplicate-string-formatting-argument - '/network_instance[{:s}]/inter_instance_policies[{:s}_export]'.format( - network_instance_name, network_instance_name), { - 'name': network_instance_name, - }), - json_config_rule_delete( - '/routing_policy/policy_definition[{:s}_export]/statement[{:s}]'.format( - network_instance_name, '3'), { - 'policy_name': '{:s}_export'.format(network_instance_name), 'statement_name': '3', + '/routing_policy/bgp_defined_set[{:s}_rt_import][{:s}]'.format(policy_import, route_distinguisher), { + 'ext_community_set_name': 'set_{:s}'.format(policy_import), }), - json_config_rule_delete( - '/routing_policy/policy_definition[{:s}_export]'.format(network_instance_name), { - 'policy_name': '{:s}_export'.format(network_instance_name), - }), - json_config_rule_delete( - '/routing_policy/bgp_defined_set[{:s}_rt_export][route-target:{:s}]'.format( - network_instance_name, bgp_route_target), { - 'ext_community_set_name': '{:s}_rt_export'.format(network_instance_name), - 'ext_community_member' : 'route-target:{:s}'.format(bgp_route_target), - }), - json_config_rule_delete( - '/routing_policy/bgp_defined_set[{:s}_rt_export]'.format(network_instance_name), { - 'ext_community_set_name': '{:s}_rt_export'.format(network_instance_name), + + #Delete interface; automatically deletes: + # - /interface[]/subinterface[] + json_config_rule_delete('/interface[{:s}]/subinterface[0]'.format(if_subif_name), + { + 'name': if_subif_name, }), - json_config_rule_delete( - '/network_instance[{:s}]'.format(network_instance_name), { - 'name': network_instance_name + + #Delete network instance; automatically deletes: + # - /network_instance[]/interface[] + # - /network_instance[]/protocols[] + # - /network_instance[]/inter_instance_policies[] + json_config_rule_delete('/network_instance[{:s}]'.format(network_instance_name), + { + 'name': network_instance_name }), ] return json_config_rules diff --git a/src/service/service/service_handlers/l3nm_openconfig/L3NMOpenConfigServiceHandler.py b/src/service/service/service_handlers/l3nm_openconfig/L3NMOpenConfigServiceHandler.py index e3af6302dc996bb1582f2c339b3296800aa9d655..b2639ddad58e4c453f1b1e2dc87fce8861ad79a2 100644 --- a/src/service/service/service_handlers/l3nm_openconfig/L3NMOpenConfigServiceHandler.py +++ b/src/service/service/service_handlers/l3nm_openconfig/L3NMOpenConfigServiceHandler.py @@ -75,10 +75,12 @@ class L3NMOpenConfigServiceHandler(_ServiceHandler): service_uuid, connection_uuid, device_uuid, endpoint_uuid, endpoint_name, settings, endpoint_settings) - del device_obj.device_config.config_rules[:] - for json_config_rule in json_config_rules: - device_obj.device_config.config_rules.append(ConfigRule(**json_config_rule)) - self.__task_executor.configure_device(device_obj) + if len(json_config_rules) > 0: + del device_obj.device_config.config_rules[:] + for json_config_rule in json_config_rules: + device_obj.device_config.config_rules.append(ConfigRule(**json_config_rule)) + self.__task_executor.configure_device(device_obj) + results.append(True) except Exception as e: # pylint: disable=broad-except LOGGER.exception('Unable to SetEndpoint({:s})'.format(str(endpoint))) @@ -110,10 +112,12 @@ class L3NMOpenConfigServiceHandler(_ServiceHandler): service_uuid, connection_uuid, device_uuid, endpoint_uuid, endpoint_name, settings, endpoint_settings) - del device_obj.device_config.config_rules[:] - for json_config_rule in json_config_rules: - device_obj.device_config.config_rules.append(ConfigRule(**json_config_rule)) - self.__task_executor.configure_device(device_obj) + if len(json_config_rules) > 0: + del device_obj.device_config.config_rules[:] + for json_config_rule in json_config_rules: + device_obj.device_config.config_rules.append(ConfigRule(**json_config_rule)) + self.__task_executor.configure_device(device_obj) + results.append(True) except Exception as e: # pylint: disable=broad-except LOGGER.exception('Unable to DeleteEndpoint({:s})'.format(str(endpoint))) diff --git a/src/tests/ofc22/descriptors_emulated_xr.json b/src/tests/ofc22/descriptors_emulated_xr.json index 4e247bb30d4df25fa75d30a3baa94f1348c0a6d9..b873d31143406a5f6cedbf19c1b357f2223d42d9 100644 --- a/src/tests/ofc22/descriptors_emulated_xr.json +++ b/src/tests/ofc22/descriptors_emulated_xr.json @@ -96,6 +96,18 @@ "device_operational_status": 1, "device_drivers": [6], "device_endpoints": [] + }, + { + "device_id": {"device_uuid": {"uuid": "X2-XR-CONSTELLATION"}}, + "device_type": "xr-constellation", + "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "172.19.219.44"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "443"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": "{\"username\": \"xr-user-1\", \"password\": \"xr-user-1\", \"hub_module_name\": \"XR HUB 2\", \"consistency-mode\": \"lifecycle\"}"}} + ]}, + "device_operational_status": 1, + "device_drivers": [6], + "device_endpoints": [] } ], "links": [ diff --git a/src/tests/scenario2/deploy_all.sh b/src/tests/scenario2/deploy_all.sh index 19ea0e6db16af66c716927074112da6f18e83b01..541612db431fd73e58fd7c1699df97342c11ea70 100755 --- a/src/tests/scenario2/deploy_all.sh +++ b/src/tests/scenario2/deploy_all.sh @@ -37,20 +37,20 @@ kubectl apply -f nfvsdn22/nginx-ingress-controller-dom4.yaml # Deploy TFS for Domain 1 source nfvsdn22/deploy_specs_dom1.sh -./deploy.sh +./deploy/all.sh mv tfs_runtime_env_vars.sh tfs_runtime_env_vars_dom1.sh # Deploy TFS for Domain 2 source nfvsdn22/deploy_specs_dom2.sh -./deploy.sh +./deploy/all.sh mv tfs_runtime_env_vars.sh tfs_runtime_env_vars_dom2.sh # Deploy TFS for Domain 3 source nfvsdn22/deploy_specs_dom3.sh -./deploy.sh +./deploy/all.sh mv tfs_runtime_env_vars.sh tfs_runtime_env_vars_dom3.sh # Deploy TFS for Domain 4 source nfvsdn22/deploy_specs_dom4.sh -./deploy.sh +./deploy/all.sh mv tfs_runtime_env_vars.sh tfs_runtime_env_vars_dom4.sh diff --git a/src/webui/service/service/routes.py b/src/webui/service/service/routes.py index defbe2cb003cc97830d6ec24db01bf8734a7f530..70a5b5bad41df6520cb2facdad94cfee04f726cd 100644 --- a/src/webui/service/service/routes.py +++ b/src/webui/service/service/routes.py @@ -12,27 +12,56 @@ # See the License for the specific language governing permissions and # limitations under the License. +from contextlib import contextmanager +import json import grpc -from flask import current_app, redirect, render_template, Blueprint, flash, session, url_for +from collections import defaultdict +from flask import current_app, redirect, render_template, Blueprint, flash, session, url_for, request from common.proto.context_pb2 import ( - IsolationLevelEnum, Service, ServiceId, ServiceTypeEnum, ServiceStatusEnum, Connection) + IsolationLevelEnum, Service, ServiceId, ServiceTypeEnum, ServiceStatusEnum, Connection, Empty, DeviceDriverEnum, ConfigActionEnum, Device, DeviceList) from common.tools.context_queries.Context import get_context +from common.tools.context_queries.Topology import get_topology from common.tools.context_queries.EndPoint import get_endpoint_names from common.tools.context_queries.Service import get_service from context.client.ContextClient import ContextClient from service.client.ServiceClient import ServiceClient +from typing import Optional, Set +from common.tools.object_factory.Topology import json_topology_id +from common.tools.object_factory.ConfigRule import json_config_rule_set +from common.tools.object_factory.Context import json_context_id service = Blueprint('service', __name__, url_prefix='/service') context_client = ContextClient() service_client = ServiceClient() +@contextmanager +def connected_client(c): + try: + c.connect() + yield c + finally: + c.close() + +# Context client must be in connected state when calling this function +def get_device_drivers_in_use(topology_uuid: str, context_uuid: str) -> Set[str]: + active_drivers = set() + grpc_topology = get_topology(context_client, topology_uuid, context_uuid=context_uuid, rw_copy=False) + topo_device_uuids = {device_id.device_uuid.uuid for device_id in grpc_topology.device_ids} + grpc_devices: DeviceList = context_client.ListDevices(Empty()) + for device in grpc_devices.devices: + if device.device_id.device_uuid.uuid in topo_device_uuids: + for driver in device.device_drivers: + active_drivers.add(DeviceDriverEnum.Name(driver)) + return active_drivers + @service.get('/') def home(): if 'context_uuid' not in session or 'topology_uuid' not in session: flash("Please select a context!", "warning") return redirect(url_for("main.home")) context_uuid = session['context_uuid'] + topology_uuid = session['topology_uuid'] context_client.connect() @@ -44,10 +73,12 @@ def home(): try: services = context_client.ListServices(context_obj.context_id) services = services.services + active_drivers = get_device_drivers_in_use(topology_uuid, context_uuid) except grpc.RpcError as e: if e.code() != grpc.StatusCode.NOT_FOUND: raise if e.details() != 'Context({:s}) not found'.format(context_uuid): raise services, device_names, endpoints_data = list(), dict(), dict() + active_drivers = set() else: endpoint_ids = list() for service_ in services: @@ -57,7 +88,7 @@ def home(): context_client.close() return render_template( 'service/home.html', services=services, device_names=device_names, endpoints_data=endpoints_data, - ste=ServiceTypeEnum, sse=ServiceStatusEnum) + ste=ServiceTypeEnum, sse=ServiceStatusEnum, active_drivers=active_drivers) @service.route('add', methods=['GET', 'POST']) @@ -67,6 +98,151 @@ def add(): #return render_template('service/home.html') +def get_hub_module_name(dev: Device) -> Optional[str]: + for cr in dev.device_config.config_rules: + if cr.action == ConfigActionEnum.CONFIGACTION_SET and cr.custom and cr.custom.resource_key == "_connect/settings": + try: + cr_dict = json.loads(cr.custom.resource_value) + if "hub_module_name" in cr_dict: + return cr_dict["hub_module_name"] + except json.JSONDecodeError: + pass + return None + +@service.route('add-xr', methods=['GET', 'POST']) +def add_xr(): + ### FIXME: copypaste + if 'context_uuid' not in session or 'topology_uuid' not in session: + flash("Please select a context!", "warning") + return redirect(url_for("main.home")) + + context_uuid = session['context_uuid'] + topology_uuid = session['topology_uuid'] + + context_client.connect() + grpc_topology = get_topology(context_client, topology_uuid, context_uuid=context_uuid, rw_copy=False) + if grpc_topology is None: + flash('Context({:s})/Topology({:s}) not found'.format(str(context_uuid), str(topology_uuid)), 'danger') + return redirect(url_for("main.home")) + else: + topo_device_uuids = {device_id.device_uuid.uuid for device_id in grpc_topology.device_ids} + grpc_devices= context_client.ListDevices(Empty()) + devices = [ + device for device in grpc_devices.devices + if device.device_id.device_uuid.uuid in topo_device_uuids and DeviceDriverEnum.DEVICEDRIVER_XR in device.device_drivers + ] + devices.sort(key=lambda dev: dev.name) + + hub_interfaces_by_device = defaultdict(list) + leaf_interfaces_by_device = defaultdict(list) + constellation_name_to_uuid = {} + dev_ep_to_uuid = {} + ep_uuid_to_name = {} + for d in devices: + constellation_name_to_uuid[d.name] = d.device_id.device_uuid.uuid + hm_name = get_hub_module_name(d) + if hm_name is not None: + hm_if_prefix= hm_name + "|" + for ep in d.device_endpoints: + dev_ep_to_uuid[(d.name, ep.name)] = ep.endpoint_id.endpoint_uuid.uuid + if ep.name.startswith(hm_if_prefix): + hub_interfaces_by_device[d.name].append(ep.name) + else: + leaf_interfaces_by_device[d.name].append(ep.name) + ep_uuid_to_name[ep.endpoint_id.endpoint_uuid.uuid] = (d.name, ep.name) + hub_interfaces_by_device[d.name].sort() + leaf_interfaces_by_device[d.name].sort() + + # Find out what endpoints are already used so that they can be disabled + # in the create screen + context_obj = get_context(context_client, context_uuid, rw_copy=False) + if context_obj is None: + flash('Context({:s}) not found'.format(str(context_uuid)), 'danger') + return redirect(request.url) + + services = context_client.ListServices(context_obj.context_id) + ep_used_by={} + for service in services.services: + if service.service_type == ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE: + for ep in service.service_endpoint_ids: + ep_uuid = ep.endpoint_uuid.uuid + if ep_uuid in ep_uuid_to_name: + dev_name, ep_name = ep_uuid_to_name[ep_uuid] + ep_used_by[f"{ep_name}@{dev_name}"] = service.name + + context_client.close() + + if request.method != 'POST': + return render_template('service/add-xr.html', devices=devices, hub_if=hub_interfaces_by_device, leaf_if=leaf_interfaces_by_device, ep_used_by=ep_used_by) + else: + service_name = request.form["service_name"] + if service_name == "": + flash(f"Service name must be specified", 'danger') + + constellation = request.form["constellation"] + constellation_uuid = constellation_name_to_uuid.get(constellation, None) + if constellation_uuid is None: + flash(f"Invalid constellation \"{constellation}\"", 'danger') + + hub_if = request.form["hubif"] + hub_if_uuid = dev_ep_to_uuid.get((constellation, hub_if), None) + if hub_if_uuid is None: + flash(f"Invalid hub interface \"{hub_if}\"", 'danger') + + leaf_if = request.form["leafif"] + leaf_if_uuid = dev_ep_to_uuid.get((constellation, leaf_if), None) + if leaf_if_uuid is None: + flash(f"Invalid leaf interface \"{leaf_if}\"", 'danger') + + if service_name == "" or constellation_uuid is None or hub_if_uuid is None or leaf_if_uuid is None: + return redirect(request.url) + + + json_context_uuid=json_context_id(context_uuid) + sr = { + "name": service_name, + "service_id": { + "context_id": {"context_uuid": {"uuid": context_uuid}}, + "service_uuid": {"uuid": service_name} + }, + 'service_type' : ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE, + "service_endpoint_ids": [ + {'device_id': {'device_uuid': {'uuid': constellation_uuid}}, 'endpoint_uuid': {'uuid': hub_if_uuid}, 'topology_id': json_topology_id("admin", context_id=json_context_uuid)}, + {'device_id': {'device_uuid': {'uuid': constellation_uuid}}, 'endpoint_uuid': {'uuid': leaf_if_uuid}, 'topology_id': json_topology_id("admin", context_id=json_context_uuid)} + ], + 'service_status' : {'service_status': ServiceStatusEnum.SERVICESTATUS_PLANNED}, + 'service_constraints' : [], + } + + json_tapi_settings = { + 'capacity_value' : 50.0, + 'capacity_unit' : 'GHz', + 'layer_proto_name': 'PHOTONIC_MEDIA', + 'layer_proto_qual': 'tapi-photonic-media:PHOTONIC_LAYER_QUALIFIER_NMC', + 'direction' : 'UNIDIRECTIONAL', + } + config_rule = json_config_rule_set('/settings', json_tapi_settings) + + with connected_client(service_client) as sc: + endpoints, sr['service_endpoint_ids'] = sr['service_endpoint_ids'], [] + try: + create_response = sc.CreateService(Service(**sr)) + except Exception as e: + flash(f'Failure to update service name {service_name} with endpoints and configuration, exception {str(e)}', 'danger') + return redirect(request.url) + + sr['service_endpoint_ids'] = endpoints + sr['service_config'] = {'config_rules': [config_rule]} + + try: + update_response = sc.UpdateService(Service(**sr)) + flash(f'Created service {update_response.service_uuid.uuid}', 'success') + except Exception as e: + flash(f'Failure to update service {create_response.service_uuid.uuid} with endpoints and configuration, exception {str(e)}', 'danger') + return redirect(request.url) + + return redirect(url_for('service.home')) + @service.get('<path:service_uuid>/detail') def detail(service_uuid: str): if 'context_uuid' not in session or 'topology_uuid' not in session: diff --git a/src/webui/service/templates/service/add-xr.html b/src/webui/service/templates/service/add-xr.html new file mode 100644 index 0000000000000000000000000000000000000000..36fe132caa7df1e3c72fa09ff2c39e2a92a7a357 --- /dev/null +++ b/src/webui/service/templates/service/add-xr.html @@ -0,0 +1,105 @@ +<!-- + Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +--> + +{% extends 'base.html' %} + +{% block content %} + <script> + var js_hub_if = JSON.parse('{{hub_if | tojson | safe}}'); + var js_leaf_if = JSON.parse('{{leaf_if | tojson | safe}}'); + var js_ep_used_by = JSON.parse('{{ep_used_by | tojson | safe}}'); + + function clear_select_except_first(s) { + while (s.options.length > 1) { + s.remove(1); + } + } + + function add_ep_to_select(sel, dev_name, ep_name) { + used_by = js_ep_used_by[ep_name + "@" + dev_name]; + var o; + if (used_by === undefined) { + o = new Option(ep_name, ep_name) + } else { + o = new Option(ep_name + " (used by " + used_by + ")", ep_name) + o.disabled=true + } + sel.add(o); + } + + function constellationSelected() { + const constellation_select = document.getElementById('constellation'); + const hubif_select = document.getElementById('hubif'); + const leafif_select = document.getElementById('leafif'); + + clear_select_except_first(hubif_select) + clear_select_except_first(leafif_select) + if (constellation_select.value) { + const hub_ifs=js_hub_if[constellation_select.value] + for (const hi of hub_ifs) { + add_ep_to_select(hubif_select, constellation_select.value, hi); + } + + const leaf_ifs=js_leaf_if[constellation_select.value] + for (const li of leaf_ifs) { + add_ep_to_select(leafif_select, constellation_select.value, li); + } + } + } + </script> + + <h1>Add XR Service</h1> + <form action="#" method="post"> + <fieldset class="form-group row mb-3"> + <label for="service_name" class="col-sm-3 col-form-label">Service name:</label> + <div class="col-sm-9"> + <input type="text" id="service_name" name="service_name" class="form-control"> + </div> + </fieldset> + + <fieldset class="form-group row mb-3"> + <label for="constellation" class="col-sm-3 col-form-label">Constellation:</label> + <div class="col-sm-9"> + <select name="constellation" id="constellation" onchange="constellationSelected()" class="form-select"> + <option value="">(choose constellation)</option> + {% for dev in devices %} + <option value="{{dev.name}}">{{dev.name}}</option> + {% endfor %} + </select> + </div> + </fieldset> + + <fieldset class="form-group row mb-3"> + <label for="hubif" class="col-sm-3 col-form-label">Hub Endpoint:</label> + <div class="col-sm-9"> + <select name="hubif" id="hubif" class="col-sm-8 form-select"> + <option value="">(choose hub endpoint)</option> + </select> + </div> + </fieldset> + + <fieldset class="form-group row mb-3"> + <label for="leafif" class="col-sm-3 col-form-label">Leaf Endpoint:</label> + <div class="col-sm-9"> + <select name="leafif" id="leafif" class="col-sm-8 form-select"> + <option value="">(choose leaf endpoint)</option> + </select> + </div> + </fieldset> + + <input type="submit" class="btn btn-primary" value="Create"> + </form> +{% endblock %} diff --git a/src/webui/service/templates/service/home.html b/src/webui/service/templates/service/home.html index 79b55c962dcdd0af4a380928c180f6c9def75ba7..00feaff59128dd026ab2bdb369229a9d0aaae805 100644 --- a/src/webui/service/templates/service/home.html +++ b/src/webui/service/templates/service/home.html @@ -26,6 +26,18 @@ Add New Service </a> </div> --> + + <!-- Only display XR service addition button if there are XR constellations. Otherwise it might confuse + user, as other service types do not have GUI to add service yet. --> + {% if "DEVICEDRIVER_XR" in active_drivers %} + <div class="col"> + <a href="{{ url_for('service.add_xr') }}" class="btn btn-primary" style="margin-bottom: 10px;"> + <i class="bi bi-plus"></i> + Add New XR Service + </a> + </div> + {% endif %} + <div class="col"> {{ services | length }} services found in context <i>{{ session['context_uuid'] }}</i> </div>