Newer
Older
# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import anytree, logging, pytz, queue, threading

Lluis Gifre Renom
committed
#import lxml.etree as ET
from datetime import datetime, timedelta
from typing import Any, Iterator, List, Optional, Tuple, Union
from apscheduler.executors.pool import ThreadPoolExecutor
from apscheduler.job import Job
from apscheduler.jobstores.memory import MemoryJobStore
from apscheduler.schedulers.background import BackgroundScheduler
from common.method_wrappers.Decorator import MetricTypeEnum, MetricsPool, metered_subclass_method, INF
from common.type_checkers.Checkers import chk_length, chk_string, chk_type, chk_float
from device.service.driver_api._Driver import _Driver
from device.service.driver_api.AnyTreeTools import TreeNode, get_subnode, set_subnode_value
from .templates import ALL_RESOURCE_KEYS, get_filter, parse
from .NetconfSessionHandler import NetconfSessionHandler, edit_config
from .SamplesCache import SamplesCache, do_sampling #dump_subtree
#from .TelemetryProtocolEnum import TelemetryProtocolEnum, parse_telemetry_protocol

Lluis Gifre Renom
committed
#from .Tools import xml_pretty_print, xml_to_dict, xml_to_file
logging.getLogger('ncclient.manager').setLevel(logging.DEBUG if DEBUG_MODE else logging.WARNING)
logging.getLogger('ncclient.transport.ssh').setLevel(logging.DEBUG if DEBUG_MODE else logging.WARNING)
logging.getLogger('apscheduler.executors.default').setLevel(logging.INFO if DEBUG_MODE else logging.ERROR)
logging.getLogger('apscheduler.scheduler').setLevel(logging.INFO if DEBUG_MODE else logging.ERROR)
logging.getLogger('monitoring-client').setLevel(logging.INFO if DEBUG_MODE else logging.ERROR)
LOGGER = logging.getLogger(__name__)
HISTOGRAM_BUCKETS = (
# .005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, INF
0.0001, 0.00025, 0.00050, 0.00075,
0.0010, 0.0025, 0.0050, 0.0075,
0.0100, 0.0250, 0.0500, 0.0750,
0.1000, 0.2500, 0.5000, 0.7500,
1.0000, 2.5000, 5.0000, 7.5000,
10.0, 25.0, 50.0, 75.0,
100.0, INF
)
METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': 'openconfig'})
METRICS_POOL.get_or_create('GetInitialConfig', MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS)
METRICS_POOL.get_or_create('GetConfig', MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS)
METRICS_POOL.get_or_create('SetConfig', MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS)
METRICS_POOL.get_or_create('DeleteConfig', MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS)
METRICS_POOL.get_or_create('SubscribeState', MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS)
METRICS_POOL.get_or_create('UnsubscribeState', MetricTypeEnum.HISTOGRAM_DURATION, buckets=HISTOGRAM_BUCKETS)
class OpenConfigDriver(_Driver):
def __init__(self, address : str, port : int, **settings) -> None: # pylint: disable=super-init-not-called
self.__lock = threading.Lock()
#self.__initial = TreeNode('.')
#self.__running = TreeNode('.')
self.__subscriptions = TreeNode('.')
self.__started = threading.Event()
self.__terminate = threading.Event()
#self.__telemetry_protocol = parse_telemetry_protocol(settings.get('telemetry_protocol'))
#if self.__telemetry_protocol == TelemetryProtocolEnum.GNMI:
# self.__telemetry = GnmiTelemetry()
#else:
# self.__telemetry = NetConfTelemetry()
self.__scheduler = BackgroundScheduler(daemon=True) # scheduler used to emulate sampling events
self.__scheduler.configure(
jobstores = {'default': MemoryJobStore()},
executors = {'default': ThreadPoolExecutor(max_workers=1)},
job_defaults = {'coalesce': False, 'max_instances': 3},
timezone=pytz.utc)
self.__out_samples = queue.Queue()

Lluis Gifre Renom
committed
self.__netconf_handler : NetconfSessionHandler = NetconfSessionHandler(address, port, **settings)
self.__samples_cache = SamplesCache(self.__netconf_handler)
def Connect(self) -> bool:
with self.__lock:
if self.__started.is_set(): return True

Lluis Gifre Renom
committed
self.__netconf_handler.connect()
# Connect triggers activation of sampling events that will be scheduled based on subscriptions
self.__scheduler.start()
self.__started.set()
return True
def Disconnect(self) -> bool:
with self.__lock:
# Trigger termination of loops and processes
self.__terminate.set()
# If not started, assume it is already disconnected
if not self.__started.is_set(): return True
# Disconnect triggers deactivation of sampling events
self.__scheduler.shutdown()

Lluis Gifre Renom
committed
self.__netconf_handler.disconnect()
def GetInitialConfig(self) -> List[Tuple[str, Any]]:
with self.__lock:
return []
def GetConfig(self, resource_keys : List[str] = []) -> List[Tuple[str, Union[Any, None, Exception]]]:
chk_type('resources', resource_keys, list)
results = []
with self.__lock:
if len(resource_keys) == 0: resource_keys = ALL_RESOURCE_KEYS
for i,resource_key in enumerate(resource_keys):
str_resource_name = 'resource_key[#{:d}]'.format(i)
try:
chk_string(str_resource_name, resource_key, allow_empty=False)
str_filter = get_filter(resource_key)

Lluis Gifre Renom
committed
LOGGER.info('[GetConfig] str_filter = {:s}'.format(str(str_filter)))
if str_filter is None: str_filter = resource_key

Lluis Gifre Renom
committed
xml_data = self.__netconf_handler.get(filter=str_filter).data_ele
if isinstance(xml_data, Exception): raise xml_data
results.extend(parse(resource_key, xml_data))
except Exception as e: # pylint: disable=broad-except
LOGGER.exception('Exception retrieving {:s}: {:s}'.format(str_resource_name, str(resource_key)))
results.append((resource_key, e)) # if validation fails, store the exception
return results
def SetConfig(self, resources : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]:
chk_type('resources', resources, list)
if len(resources) == 0: return []
with self.__lock:
if self.__netconf_handler.use_candidate:
with self.__netconf_handler.locked(target='candidate'):
if self.__netconf_handler.commit_per_rule:
results = edit_config(
self.__netconf_handler, resources, target='candidate', commit_per_rule=True)
else:
results = edit_config(self.__netconf_handler, resources, target='candidate')
try:
self.__netconf_handler.commit()
except Exception as e: # pylint: disable=broad-except
LOGGER.exception('[SetConfig] Exception commiting resources: {:s}'.format(str(resources)))
results = [e for _ in resources] # if commit fails, set exception in each resource
else:
results = edit_config(self.__netconf_handler, resources)
return results
def DeleteConfig(self, resources : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]:
chk_type('resources', resources, list)
if len(resources) == 0: return []
with self.__lock:
if self.__netconf_handler.use_candidate:
with self.__netconf_handler.locked(target='candidate'):
if self.__netconf_handler.commit_per_rule:
results = edit_config(
self.__netconf_handler, resources, target='candidate', delete=True, commit_per_rule=True)
else:
results = edit_config(self.__netconf_handler, resources, target='candidate', delete=True)
try:
self.__netconf_handler.commit()
except Exception as e: # pylint: disable=broad-except
MSG = '[DeleteConfig] Exception commiting resources: {:s}'
LOGGER.exception(MSG.format(str(resources)))
results = [e for _ in resources] # if commit fails, set exception in each resource
results = edit_config(self.__netconf_handler, resources, delete=True)
return results
def SubscribeState(self, subscriptions : List[Tuple[str, float, float]]) -> List[Union[bool, Exception]]:
chk_type('subscriptions', subscriptions, list)
if len(subscriptions) == 0: return []
results = []
resolver = anytree.Resolver(pathattr='name')
with self.__lock:
for i,subscription in enumerate(subscriptions):
str_subscription_name = 'subscriptions[#{:d}]'.format(i)
try:
chk_type(str_subscription_name, subscription, (list, tuple))
chk_length(str_subscription_name, subscription, min_length=3, max_length=3)
resource_key,sampling_duration,sampling_interval = subscription
chk_string(str_subscription_name + '.resource_key', resource_key, allow_empty=False)
resource_path = resource_key.split('/')
chk_float(str_subscription_name + '.sampling_duration', sampling_duration, min_value=0)
chk_float(str_subscription_name + '.sampling_interval', sampling_interval, min_value=0)
except Exception as e: # pylint: disable=broad-except
LOGGER.exception('Exception validating {:s}: {:s}'.format(str_subscription_name, str(resource_key)))
results.append(e) # if validation fails, store the exception
continue
start_date,end_date = None,None

Lluis Gifre Renom
committed
if sampling_duration >= 1.e-12:
start_date = datetime.utcnow()
end_date = start_date + timedelta(seconds=sampling_duration)
job_id = 'k={:s}/d={:f}/i={:f}'.format(resource_key, sampling_duration, sampling_interval)
job = self.__scheduler.add_job(

Lluis Gifre Renom
committed
do_sampling, args=(self.__samples_cache, resource_key, self.__out_samples),
kwargs={}, id=job_id, trigger='interval', seconds=sampling_interval,
start_date=start_date, end_date=end_date, timezone=pytz.utc)
subscription_path = resource_path + ['{:.3f}:{:.3f}'.format(sampling_duration, sampling_interval)]
set_subnode_value(resolver, self.__subscriptions, subscription_path, job)
results.append(True)
return results
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
def UnsubscribeState(self, subscriptions : List[Tuple[str, float, float]]) -> List[Union[bool, Exception]]:
chk_type('subscriptions', subscriptions, list)
if len(subscriptions) == 0: return []
results = []
resolver = anytree.Resolver(pathattr='name')
with self.__lock:
for i,resource in enumerate(subscriptions):
str_subscription_name = 'resources[#{:d}]'.format(i)
try:
chk_type(str_subscription_name, resource, (list, tuple))
chk_length(str_subscription_name, resource, min_length=3, max_length=3)
resource_key,sampling_duration,sampling_interval = resource
chk_string(str_subscription_name + '.resource_key', resource_key, allow_empty=False)
resource_path = resource_key.split('/')
chk_float(str_subscription_name + '.sampling_duration', sampling_duration, min_value=0)
chk_float(str_subscription_name + '.sampling_interval', sampling_interval, min_value=0)
except Exception as e: # pylint: disable=broad-except
LOGGER.exception('Exception validating {:s}: {:s}'.format(str_subscription_name, str(resource_key)))
results.append(e) # if validation fails, store the exception
continue
subscription_path = resource_path + ['{:.3f}:{:.3f}'.format(sampling_duration, sampling_interval)]
subscription_node = get_subnode(resolver, self.__subscriptions, subscription_path)
# if not found, resource_node is None
if subscription_node is None:
results.append(False)
continue
job : Job = getattr(subscription_node, 'value', None)
if job is None or not isinstance(job, Job):
raise Exception('Malformed subscription node or wrong resource key: {:s}'.format(str(resource)))
job.remove()
parent = subscription_node.parent
children = list(parent.children)
children.remove(subscription_node)
parent.children = tuple(children)
results.append(True)
return results
def GetState(self, blocking=False, terminate : Optional[threading.Event] = None) -> Iterator[Tuple[str, Any]]:
while True:
if self.__terminate.is_set(): break
if terminate is not None and terminate.is_set(): break
try:
sample = self.__out_samples.get(block=blocking, timeout=0.1)
except queue.Empty:
if blocking: continue
return
if sample is None: continue
yield sample