Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • tfs/controller
1 result
Show changes
Commits on Source (6)
  • Lluis Gifre Renom's avatar
    Integration fix resolution: · f691ef72
    Lluis Gifre Renom authored
    Device:
    - fixed problem with subscription handling
    f691ef72
  • Lluis Gifre Renom's avatar
    WebUI: · ffbf7393
    Lluis Gifre Renom authored
    - Activated "Grafana" link in top menu
    - Implemented onboarding of devices + configuration from JSON descriptor files
    - added device status in device details page
    - created new grafana dashboard for new PostgreSQL-based monitoring database
    - updated deploy.sh script with correct configuration of Grafana monitoring datasource and new dashboard
    ffbf7393
  • Lluis Gifre Renom's avatar
    Monitoring component: · d743ddd1
    Lluis Gifre Renom authored
    - corrected deployment name in manifest file
    - added resource requests and limits for monitoring server container
    d743ddd1
  • Lluis Gifre Renom's avatar
    Functional tests: · 85a2d9a2
    Lluis Gifre Renom authored
    - corrected components imported by the tests and default my_deploy.sh
    85a2d9a2
  • Lluis Gifre Renom's avatar
    ECOC'22 functional test: · 8806bbf9
    Lluis Gifre Renom authored
    - Added script to generate topologies and services using DC_CSGW_OLS
    - Corrected script to generate topologies and services using DC_CSGW_TN_OLS
    - Corrected resources imported by functional tests
    8806bbf9
  • Lluis Gifre Renom's avatar
    Common and Device: · 10a63cc4
    Lluis Gifre Renom authored
    - Implemented generic MutexQueues class
    - Implemented sequentialization of operations in Device component to prevent data corruption and race conditions
    10a63cc4
Showing
with 1029 additions and 339 deletions
......@@ -247,19 +247,22 @@ if [[ "$TFS_COMPONENTS" == *"webui"* ]] && [[ "$TFS_COMPONENTS" == *"monitoring"
"url" : "monitoringservice:8812",
"database" : "monitoring",
"user" : "admin",
"password" : "quest",
"basicAuth" : false,
"basicAuth": false,
"isDefault": true,
"jsonData" : {
"sslmode" : "disable",
"postgresVersion" : 1100,
"tlsAuth" : false,
"tlsAuthWithCACert": false,
"sslmode" : "disable",
"postgresVersion" : 1100,
"maxOpenConns" : 0,
"maxIdleConns" : 2,
"connMaxLifetime" : 14400,
"tlsAuth" : false,
"tlsAuthWithCACert" : false,
"timescaledb" : false,
"tlsConfigurationMethod": "file-path",
"tlsSkipVerify": true
"tlsSkipVerify" : true
},
"secureJsonFields" : {
"password" : true
"secureJsonData": {
"password": "quest"
}
}' ${GRAFANA_URL_UPDATED}/api/datasources
echo
......@@ -267,7 +270,7 @@ if [[ "$TFS_COMPONENTS" == *"webui"* ]] && [[ "$TFS_COMPONENTS" == *"monitoring"
# Create Monitoring Dashboard
# Ref: https://grafana.com/docs/grafana/latest/http_api/dashboard/
curl -X POST -H "Content-Type: application/json" \
-d '@src/webui/grafana_dashboard.json' \
-d '@src/webui/grafana_dashboard_psql.json' \
${GRAFANA_URL_UPDATED}/api/dashboards/db
echo
......
......@@ -51,7 +51,7 @@ spec:
apiVersion: apps/v1
kind: Deployment
metadata:
name: monitoringserver
name: monitoringservice
spec:
selector:
matchLabels:
......@@ -89,6 +89,13 @@ spec:
livenessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:7070"]
resources:
requests:
cpu: 250m
memory: 512Mi
limits:
cpu: 700m
memory: 1024Mi
---
apiVersion: v1
kind: Service
......
# Set the URL of your local Docker registry where the images will be uploaded to.
export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/"
# Set the list of components, separated by spaces, you want to build images for, and deploy.
......@@ -6,10 +7,16 @@ export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/"
# interdomain slice pathcomp dlt
# dbscanserving opticalattackmitigator opticalattackdetector
# l3_attackmitigator l3_centralizedattackdetector l3_distributedattackdetector
export TFS_COMPONENTS="context device automation pathcomp service slice compute monitoring webui"
export TFS_COMPONENTS="context device automation monitoring pathcomp service slice compute webui"
# Set the tag you want to use for your images.
export TFS_IMAGE_TAG="dev"
# Set the name of the Kubernetes namespace to deploy to.
export TFS_K8S_NAMESPACE="tfs"
# Set additional manifest files to be applied after the deployment
export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml"
# Set the neew Grafana admin password
export TFS_GRAFANA_PASSWORD="admin123+"
# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# MutexQueues:
# ------------
# This class enables to schedule and serialize operations concurrently issued
# over a number of resources. For instance, when multiple components want to
# configure devices through the Device component, configuration operations
# have to be serialized to prevent data corruptions, and race conditions, etc.
# Usage Example:
# class Servicer():
# def __init__(self):
# # init other stuff
# self.drivers = dict()
# self.mutex_queues = MutexQueues()
#
# def configure_device(self, device_uuid, settings):
# self.mutex_queues.wait_my_turn(device_uuid)
# driver = self.drivers.get(device_uuid)
# if driver is None:
# driver = Driver(device_uuid)
# self.drivers[device_uuid] = driver
# driver.configure(settings)
# self.mutex_queues.signal_done(device_uuid)
import threading
from queue import Queue
from typing import Dict
class MutexQueues:
def __init__(self) -> None:
# lock to protect dictionary updates
self.lock = threading.Lock()
# dictionaty of queues of mutexes: queue_name => queue[mutex]
# first mutex is the running one
self.mutex_queues : Dict[str, Queue[threading.Event]] = dict()
def wait_my_turn(self, queue_name : str) -> None:
# create my mutex and enqueue it
mutex = threading.Event()
with self.lock:
queue : Queue = self.mutex_queues.setdefault(queue_name, Queue())
first_in_queue = (queue.qsize() == 0)
queue.put_nowait(mutex)
# if I'm the first in the queue upon addition, means there are no running tasks
# directly return without waiting
if first_in_queue: return
# otherwise, wait for my turn in the queue
mutex.wait()
def signal_done(self, queue_name : str) -> None:
# I'm done with my work
with self.lock:
queue : Queue = self.mutex_queues.setdefault(queue_name, Queue())
# remove muself from the queue
queue.get_nowait()
# if there are no other tasks queued, return
if queue.qsize() == 0: return
# otherwise, signal the next task in the queue to start
next_mutex : threading.Event = queue.queue[0]
next_mutex.set()
# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
......@@ -23,10 +23,15 @@ from .driver_api.DriverInstanceCache import DriverInstanceCache
from .DeviceServiceServicerImpl import DeviceServiceServicerImpl
from .MonitoringLoops import MonitoringLoops
# Custom gRPC settings
# Multiple clients might keep connections alive waiting for RPC methods to be executed.
# Requests needs to be serialized to ensure correct device configurations
GRPC_MAX_WORKERS = 200
class DeviceService(GenericGrpcService):
def __init__(self, driver_instance_cache : DriverInstanceCache, cls_name: str = __name__) -> None:
port = get_service_port_grpc(ServiceNameEnum.DEVICE)
super().__init__(port, cls_name=cls_name)
super().__init__(port, max_workers=GRPC_MAX_WORKERS, cls_name=cls_name)
database = Database(get_database_backend(backend=BackendEnum.INMEMORY))
self.monitoring_loops = MonitoringLoops(database)
self.device_servicer = DeviceServiceServicerImpl(database, driver_instance_cache, self.monitoring_loops)
......
......@@ -24,6 +24,7 @@ from common.proto.kpi_sample_types_pb2 import KpiSampleType
from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method
from common.rpc_method_wrapper.ServiceExceptions import InvalidArgumentException, OperationFailedException
from common.tools.grpc.Tools import grpc_message_to_json
from common.tools.mutex_queues.MutexQueues import MutexQueues
from context.client.ContextClient import ContextClient
from .database.ConfigModel import (
ConfigModel, ConfigRuleModel, ORM_ConfigActionEnum, get_config_rules, grpc_config_rules_to_raw, update_config)
......@@ -56,6 +57,7 @@ class DeviceServiceServicerImpl(DeviceServiceServicer):
self.database = database
self.driver_instance_cache = driver_instance_cache
self.monitoring_loops = monitoring_loops
self.mutex_queues = MutexQueues()
LOGGER.debug('Servicer Created')
@safe_and_metered_rpc_method(METRICS, LOGGER)
......@@ -101,348 +103,368 @@ class DeviceServiceServicerImpl(DeviceServiceServicer):
json_request['device_config'] = {}
request = Device(**json_request)
sync_device_from_context(device_uuid, self.context_client, self.database)
db_device,_ = update_device_in_local_database(self.database, request)
driver_filter_fields = get_device_driver_filter_fields(db_device)
#LOGGER.info('[AddDevice] connection_config_rules = {:s}'.format(str(connection_config_rules)))
address = connection_config_rules.pop('address', None)
port = connection_config_rules.pop('port', None)
settings = connection_config_rules.pop('settings', '{}')
self.mutex_queues.wait_my_turn(device_uuid)
try:
settings = json.loads(settings)
except ValueError as e:
raise InvalidArgumentException(
'device.device_config.config_rules[settings]', settings,
extra_details='_connect/settings Config Rules provided cannot be decoded as JSON dictionary.') from e
driver : _Driver = self.driver_instance_cache.get(
device_uuid, filter_fields=driver_filter_fields, address=address, port=port, settings=settings)
driver.Connect()
endpoints = driver.GetConfig([RESOURCE_ENDPOINTS])
try:
for resource_key, resource_value in endpoints:
sync_device_from_context(device_uuid, self.context_client, self.database)
db_device,_ = update_device_in_local_database(self.database, request)
driver_filter_fields = get_device_driver_filter_fields(db_device)
#LOGGER.info('[AddDevice] connection_config_rules = {:s}'.format(str(connection_config_rules)))
address = connection_config_rules.pop('address', None)
port = connection_config_rules.pop('port', None)
settings = connection_config_rules.pop('settings', '{}')
try:
settings = json.loads(settings)
except ValueError as e:
raise InvalidArgumentException(
'device.device_config.config_rules[settings]', settings,
extra_details='_connect/settings Config Rules provided cannot be decoded as JSON dictionary.') from e
driver : _Driver = self.driver_instance_cache.get(
device_uuid, filter_fields=driver_filter_fields, address=address, port=port, settings=settings)
driver.Connect()
endpoints = driver.GetConfig([RESOURCE_ENDPOINTS])
try:
for resource_key, resource_value in endpoints:
if isinstance(resource_value, Exception):
LOGGER.error('Error retrieving "{:s}": {:s}'.format(str(RESOURCE_ENDPOINTS), str(resource_value)))
continue
endpoint_uuid = resource_value.get('uuid')
endpoint_type = resource_value.get('type')
str_endpoint_key = key_to_str([device_uuid, endpoint_uuid])
db_endpoint, _ = update_or_create_object(
self.database, EndPointModel, str_endpoint_key, {
'device_fk' : db_device,
'endpoint_uuid': endpoint_uuid,
'endpoint_type': endpoint_type,
'resource_key' : resource_key,
})
sample_types : Dict[int, str] = resource_value.get('sample_types', {})
for sample_type, monitor_resource_key in sample_types.items():
str_endpoint_monitor_key = key_to_str([str_endpoint_key, str(sample_type)])
update_or_create_object(self.database, EndPointMonitorModel, str_endpoint_monitor_key, {
'endpoint_fk' : db_endpoint,
'resource_key' : monitor_resource_key,
'kpi_sample_type': grpc_to_enum__kpi_sample_type(sample_type),
})
except: # pylint: disable=bare-except
LOGGER.exception('[AddDevice] endpoints = {:s}'.format(str(endpoints)))
raw_running_config_rules = driver.GetConfig()
running_config_rules = []
for resource_key, resource_value in raw_running_config_rules:
if isinstance(resource_value, Exception):
LOGGER.error('Error retrieving "{:s}": {:s}'.format(str(RESOURCE_ENDPOINTS), str(resource_value)))
msg = 'Error retrieving config rules: {:s} => {:s}'
LOGGER.error(msg.format(str(resource_key), str(resource_value)))
continue
endpoint_uuid = resource_value.get('uuid')
endpoint_type = resource_value.get('type')
str_endpoint_key = key_to_str([device_uuid, endpoint_uuid])
db_endpoint, _ = update_or_create_object(
self.database, EndPointModel, str_endpoint_key, {
'device_fk' : db_device,
'endpoint_uuid': endpoint_uuid,
'endpoint_type': endpoint_type,
'resource_key' : resource_key,
})
sample_types : Dict[int, str] = resource_value.get('sample_types', {})
for sample_type, monitor_resource_key in sample_types.items():
str_endpoint_monitor_key = key_to_str([str_endpoint_key, str(sample_type)])
update_or_create_object(self.database, EndPointMonitorModel, str_endpoint_monitor_key, {
'endpoint_fk' : db_endpoint,
'resource_key' : monitor_resource_key,
'kpi_sample_type': grpc_to_enum__kpi_sample_type(sample_type),
})
except: # pylint: disable=bare-except
LOGGER.exception('[AddDevice] endpoints = {:s}'.format(str(endpoints)))
raw_running_config_rules = driver.GetConfig()
running_config_rules = []
for resource_key, resource_value in raw_running_config_rules:
if isinstance(resource_value, Exception):
msg = 'Error retrieving config rules: {:s} => {:s}'
LOGGER.error(msg.format(str(resource_key), str(resource_value)))
continue
config_rule = (ORM_ConfigActionEnum.SET, resource_key, json.dumps(resource_value, sort_keys=True))
running_config_rules.append(config_rule)
config_rule = (ORM_ConfigActionEnum.SET, resource_key, json.dumps(resource_value, sort_keys=True))
running_config_rules.append(config_rule)
#for running_config_rule in running_config_rules:
# LOGGER.info('[AddDevice] running_config_rule: {:s}'.format(str(running_config_rule)))
update_config(self.database, device_uuid, 'running', running_config_rules)
#for running_config_rule in running_config_rules:
# LOGGER.info('[AddDevice] running_config_rule: {:s}'.format(str(running_config_rule)))
update_config(self.database, device_uuid, 'running', running_config_rules)
initial_config_rules = driver.GetInitialConfig()
update_config(self.database, device_uuid, 'initial', initial_config_rules)
initial_config_rules = driver.GetInitialConfig()
update_config(self.database, device_uuid, 'initial', initial_config_rules)
#LOGGER.info('[AddDevice] db_device = {:s}'.format(str(db_device.dump(
# include_config_rules=True, include_drivers=True, include_endpoints=True))))
#LOGGER.info('[AddDevice] db_device = {:s}'.format(str(db_device.dump(
# include_config_rules=True, include_drivers=True, include_endpoints=True))))
sync_device_to_context(db_device, self.context_client)
return DeviceId(**db_device.dump_id())
sync_device_to_context(db_device, self.context_client)
return DeviceId(**db_device.dump_id())
finally:
self.mutex_queues.signal_done(device_uuid)
@safe_and_metered_rpc_method(METRICS, LOGGER)
def ConfigureDevice(self, request : Device, context : grpc.ServicerContext) -> DeviceId:
device_id = request.device_id
device_uuid = device_id.device_uuid.uuid
sync_device_from_context(device_uuid, self.context_client, self.database)
self.mutex_queues.wait_my_turn(device_uuid)
try:
sync_device_from_context(device_uuid, self.context_client, self.database)
context_config_rules = get_config_rules(self.database, device_uuid, 'running')
context_config_rules = {config_rule[1]: config_rule[2] for config_rule in context_config_rules}
#LOGGER.info('[ConfigureDevice] context_config_rules = {:s}'.format(str(context_config_rules)))
context_config_rules = get_config_rules(self.database, device_uuid, 'running')
context_config_rules = {config_rule[1]: config_rule[2] for config_rule in context_config_rules}
#LOGGER.info('[ConfigureDevice] context_config_rules = {:s}'.format(str(context_config_rules)))
db_device,_ = update_device_in_local_database(self.database, request)
db_device,_ = update_device_in_local_database(self.database, request)
request_config_rules = grpc_config_rules_to_raw(request.device_config.config_rules)
#LOGGER.info('[ConfigureDevice] request_config_rules = {:s}'.format(str(request_config_rules)))
request_config_rules = grpc_config_rules_to_raw(request.device_config.config_rules)
#LOGGER.info('[ConfigureDevice] request_config_rules = {:s}'.format(str(request_config_rules)))
resources_to_set : List[Tuple[str, Any]] = [] # key, value
resources_to_delete : List[Tuple[str, Any]] = [] # key, value
resources_to_set : List[Tuple[str, Any]] = [] # key, value
resources_to_delete : List[Tuple[str, Any]] = [] # key, value
for config_rule in request_config_rules:
action, key, value = config_rule
if action == ORM_ConfigActionEnum.SET:
if (key not in context_config_rules) or (context_config_rules[key] != value):
resources_to_set.append((key, value))
elif action == ORM_ConfigActionEnum.DELETE:
if key in context_config_rules:
resources_to_delete.append((key, value))
for config_rule in request_config_rules:
action, key, value = config_rule
if action == ORM_ConfigActionEnum.SET:
if (key not in context_config_rules) or (context_config_rules[key] != value):
resources_to_set.append((key, value))
elif action == ORM_ConfigActionEnum.DELETE:
if key in context_config_rules:
resources_to_delete.append((key, value))
#LOGGER.info('[ConfigureDevice] resources_to_set = {:s}'.format(str(resources_to_set)))
#LOGGER.info('[ConfigureDevice] resources_to_delete = {:s}'.format(str(resources_to_delete)))
#LOGGER.info('[ConfigureDevice] resources_to_set = {:s}'.format(str(resources_to_set)))
#LOGGER.info('[ConfigureDevice] resources_to_delete = {:s}'.format(str(resources_to_delete)))
# TODO: use of datastores (might be virtual ones) to enable rollbacks
# TODO: use of datastores (might be virtual ones) to enable rollbacks
errors = []
errors = []
driver : _Driver = self.driver_instance_cache.get(device_uuid)
if driver is None:
errors.append('Device({:s}) has not been added to this Device instance'.format(str(device_uuid)))
driver : _Driver = self.driver_instance_cache.get(device_uuid)
if driver is None:
errors.append('Device({:s}) has not been added to this Device instance'.format(str(device_uuid)))
if len(errors) == 0:
results_setconfig = driver.SetConfig(resources_to_set)
errors.extend(check_set_errors(resources_to_set, results_setconfig))
if len(errors) == 0:
results_setconfig = driver.SetConfig(resources_to_set)
errors.extend(check_set_errors(resources_to_set, results_setconfig))
if len(errors) == 0:
results_deleteconfig = driver.DeleteConfig(resources_to_delete)
errors.extend(check_delete_errors(resources_to_delete, results_deleteconfig))
if len(errors) == 0:
results_deleteconfig = driver.DeleteConfig(resources_to_delete)
errors.extend(check_delete_errors(resources_to_delete, results_deleteconfig))
if len(errors) > 0:
raise OperationFailedException('ConfigureDevice', extra_details=errors)
if len(errors) > 0:
raise OperationFailedException('ConfigureDevice', extra_details=errors)
running_config_rules = driver.GetConfig()
running_config_rules = [
(ORM_ConfigActionEnum.SET, config_rule[0], json.dumps(config_rule[1], sort_keys=True))
for config_rule in running_config_rules if not isinstance(config_rule[1], Exception)
]
#for running_config_rule in running_config_rules:
# LOGGER.info('[ConfigureDevice] running_config_rule: {:s}'.format(str(running_config_rule)))
update_config(self.database, device_uuid, 'running', running_config_rules)
running_config_rules = driver.GetConfig()
running_config_rules = [
(ORM_ConfigActionEnum.SET, config_rule[0], json.dumps(config_rule[1], sort_keys=True))
for config_rule in running_config_rules if not isinstance(config_rule[1], Exception)
]
#for running_config_rule in running_config_rules:
# LOGGER.info('[ConfigureDevice] running_config_rule: {:s}'.format(str(running_config_rule)))
update_config(self.database, device_uuid, 'running', running_config_rules)
sync_device_to_context(db_device, self.context_client)
return DeviceId(**db_device.dump_id())
finally:
self.mutex_queues.signal_done(device_uuid)
sync_device_to_context(db_device, self.context_client)
return DeviceId(**db_device.dump_id())
@safe_and_metered_rpc_method(METRICS, LOGGER)
def DeleteDevice(self, request : DeviceId, context : grpc.ServicerContext) -> Empty:
device_uuid = request.device_uuid.uuid
self.monitoring_loops.remove(device_uuid)
self.mutex_queues.wait_my_turn(device_uuid)
try:
self.monitoring_loops.remove(device_uuid)
sync_device_from_context(device_uuid, self.context_client, self.database)
db_device : DeviceModel = get_object(self.database, DeviceModel, device_uuid, raise_if_not_found=False)
if db_device is None: return Empty()
sync_device_from_context(device_uuid, self.context_client, self.database)
db_device : DeviceModel = get_object(self.database, DeviceModel, device_uuid, raise_if_not_found=False)
if db_device is None: return Empty()
self.driver_instance_cache.delete(device_uuid)
delete_device_from_context(db_device, self.context_client)
self.driver_instance_cache.delete(device_uuid)
delete_device_from_context(db_device, self.context_client)
for db_kpi_pk,_ in db_device.references(KpiModel):
db_kpi = get_object(self.database, KpiModel, db_kpi_pk)
for db_endpoint_monitor_kpi_pk,_ in db_kpi.references(EndPointMonitorKpiModel):
get_object(self.database, EndPointMonitorKpiModel, db_endpoint_monitor_kpi_pk).delete()
db_kpi.delete()
for db_kpi_pk,_ in db_device.references(KpiModel):
db_kpi = get_object(self.database, KpiModel, db_kpi_pk)
for db_endpoint_monitor_kpi_pk,_ in db_kpi.references(EndPointMonitorKpiModel):
get_object(self.database, EndPointMonitorKpiModel, db_endpoint_monitor_kpi_pk).delete()
db_kpi.delete()
for db_endpoint_pk,_ in db_device.references(EndPointModel):
db_endpoint = EndPointModel(self.database, db_endpoint_pk)
for db_endpoint_monitor_pk,_ in db_endpoint.references(EndPointMonitorModel):
get_object(self.database, EndPointMonitorModel, db_endpoint_monitor_pk).delete()
db_endpoint.delete()
for db_endpoint_pk,_ in db_device.references(EndPointModel):
db_endpoint = EndPointModel(self.database, db_endpoint_pk)
for db_endpoint_monitor_pk,_ in db_endpoint.references(EndPointMonitorModel):
get_object(self.database, EndPointMonitorModel, db_endpoint_monitor_pk).delete()
db_endpoint.delete()
for db_driver_pk,_ in db_device.references(DriverModel):
get_object(self.database, DriverModel, db_driver_pk).delete()
for db_driver_pk,_ in db_device.references(DriverModel):
get_object(self.database, DriverModel, db_driver_pk).delete()
db_initial_config = ConfigModel(self.database, db_device.device_initial_config_fk)
for db_config_rule_pk,_ in db_initial_config.references(ConfigRuleModel):
get_object(self.database, ConfigRuleModel, db_config_rule_pk).delete()
db_initial_config = ConfigModel(self.database, db_device.device_initial_config_fk)
for db_config_rule_pk,_ in db_initial_config.references(ConfigRuleModel):
get_object(self.database, ConfigRuleModel, db_config_rule_pk).delete()
db_running_config = ConfigModel(self.database, db_device.device_running_config_fk)
for db_config_rule_pk,_ in db_running_config.references(ConfigRuleModel):
get_object(self.database, ConfigRuleModel, db_config_rule_pk).delete()
db_running_config = ConfigModel(self.database, db_device.device_running_config_fk)
for db_config_rule_pk,_ in db_running_config.references(ConfigRuleModel):
get_object(self.database, ConfigRuleModel, db_config_rule_pk).delete()
db_device.delete()
db_initial_config.delete()
db_running_config.delete()
return Empty()
db_device.delete()
db_initial_config.delete()
db_running_config.delete()
return Empty()
finally:
self.mutex_queues.signal_done(device_uuid)
@safe_and_metered_rpc_method(METRICS, LOGGER)
def GetInitialConfig(self, request : DeviceId, context : grpc.ServicerContext) -> DeviceConfig:
device_uuid = request.device_uuid.uuid
sync_device_from_context(device_uuid, self.context_client, self.database)
db_device : DeviceModel = get_object(self.database, DeviceModel, device_uuid, raise_if_not_found=False)
self.mutex_queues.wait_my_turn(device_uuid)
try:
sync_device_from_context(device_uuid, self.context_client, self.database)
db_device : DeviceModel = get_object(self.database, DeviceModel, device_uuid, raise_if_not_found=False)
config_rules = {} if db_device is None else db_device.dump_initial_config()
return DeviceConfig(config_rules=config_rules)
config_rules = {} if db_device is None else db_device.dump_initial_config()
device_config = DeviceConfig(config_rules=config_rules)
return device_config
finally:
self.mutex_queues.signal_done(device_uuid)
@safe_and_metered_rpc_method(METRICS, LOGGER)
def MonitorDeviceKpi(self, request : MonitoringSettings, context : grpc.ServicerContext) -> Empty:
kpi_uuid = request.kpi_id.kpi_id.uuid
device_uuid = request.kpi_descriptor.device_id.device_uuid.uuid
self.mutex_queues.wait_my_turn(device_uuid)
try:
subscribe = (request.sampling_duration_s > 0.0) and (request.sampling_interval_s > 0.0)
if subscribe:
db_device : DeviceModel = get_object(self.database, DeviceModel, device_uuid, raise_if_not_found=False)
if db_device is None:
msg = 'Device({:s}) has not been added to this Device instance.'.format(str(device_uuid))
raise OperationFailedException('MonitorDeviceKpi', extra_details=msg)
endpoint_id = request.kpi_descriptor.endpoint_id
endpoint_uuid = endpoint_id.endpoint_uuid.uuid
str_endpoint_key = key_to_str([device_uuid, endpoint_uuid])
endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid
endpoint_topology_uuid = endpoint_id.topology_id.topology_uuid.uuid
if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0:
str_topology_key = key_to_str([endpoint_topology_context_uuid, endpoint_topology_uuid])
str_endpoint_key = key_to_str([str_endpoint_key, str_topology_key], separator=':')
db_endpoint : EndPointModel = get_object(
self.database, EndPointModel, str_endpoint_key, raise_if_not_found=False)
if db_endpoint is None:
msg = 'Device({:s})/EndPoint({:s}) not found. EndPointKey({:s})'.format(
str(device_uuid), str(endpoint_uuid), str(str_endpoint_key))
raise OperationFailedException('MonitorDeviceKpi', extra_details=msg)
driver : _Driver = self.driver_instance_cache.get(device_uuid)
if driver is None:
msg = 'Device({:s}) has not been added to this Device instance'.format(str(device_uuid))
raise OperationFailedException('MonitorDeviceKpi', extra_details=msg)
sample_type = request.kpi_descriptor.kpi_sample_type
attributes = {
'kpi_uuid' : request.kpi_id.kpi_id.uuid,
'kpi_description' : request.kpi_descriptor.kpi_description,
'kpi_sample_type' : grpc_to_enum__kpi_sample_type(sample_type),
'device_fk' : db_device,
'endpoint_fk' : db_endpoint,
'sampling_duration': request.sampling_duration_s,
'sampling_interval': request.sampling_interval_s,
}
result : Tuple[KpiModel, bool] = update_or_create_object(self.database, KpiModel, kpi_uuid, attributes)
db_kpi, updated = result
str_endpoint_monitor_key = key_to_str([str_endpoint_key, str(sample_type)])
db_endpoint_monitor : EndPointMonitorModel = get_object(
self.database, EndPointMonitorModel, str_endpoint_monitor_key, raise_if_not_found=False)
if db_endpoint_monitor is None:
msg = 'SampleType({:s}/{:s}) not supported for Device({:s})/EndPoint({:s}).'.format(
str(sample_type), str(KpiSampleType.Name(sample_type).upper().replace('KPISAMPLETYPE_', '')),
str(device_uuid), str(endpoint_uuid))
raise OperationFailedException('MonitorDeviceKpi', extra_details=msg)
endpoint_monitor_resource_key = re.sub('[^A-Za-z0-9]', '.', db_endpoint_monitor.resource_key)
str_endpoint_monitor_kpi_key = key_to_str([device_uuid, endpoint_monitor_resource_key], separator=':')
attributes = {
'endpoint_monitor_fk': db_endpoint_monitor,
'kpi_fk' : db_kpi,
}
result : Tuple[EndPointMonitorKpiModel, bool] = update_or_create_object(
self.database, EndPointMonitorKpiModel, str_endpoint_monitor_kpi_key, attributes)
db_endpoint_monitor_kpi, updated = result
resources_to_subscribe : List[Tuple[str, float, float]] = [] # key, sampling_duration, sampling_interval
resources_to_subscribe.append(
(db_endpoint_monitor.resource_key, db_kpi.sampling_duration, db_kpi.sampling_interval))
results_subscribestate = driver.SubscribeState(resources_to_subscribe)
errors = check_subscribe_errors(resources_to_subscribe, results_subscribestate)
if len(errors) > 0: raise OperationFailedException('MonitorDeviceKpi', extra_details=errors)
self.monitoring_loops.add(device_uuid, driver)
subscribe = (request.sampling_duration_s > 0.0) and (request.sampling_interval_s > 0.0)
if subscribe:
device_uuid = request.kpi_descriptor.device_id.device_uuid.uuid
db_device : DeviceModel = get_object(self.database, DeviceModel, device_uuid, raise_if_not_found=False)
if db_device is None:
msg = 'Device({:s}) has not been added to this Device instance.'.format(str(device_uuid))
raise OperationFailedException('MonitorDeviceKpi', extra_details=msg)
endpoint_id = request.kpi_descriptor.endpoint_id
endpoint_uuid = endpoint_id.endpoint_uuid.uuid
str_endpoint_key = key_to_str([device_uuid, endpoint_uuid])
endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid
endpoint_topology_uuid = endpoint_id.topology_id.topology_uuid.uuid
if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0:
str_topology_key = key_to_str([endpoint_topology_context_uuid, endpoint_topology_uuid])
str_endpoint_key = key_to_str([str_endpoint_key, str_topology_key], separator=':')
db_endpoint : EndPointModel = get_object(
self.database, EndPointModel, str_endpoint_key, raise_if_not_found=False)
if db_endpoint is None:
msg = 'Device({:s})/EndPoint({:s}) not found. EndPointKey({:s})'.format(
str(device_uuid), str(endpoint_uuid), str(str_endpoint_key))
raise OperationFailedException('MonitorDeviceKpi', extra_details=msg)
driver : _Driver = self.driver_instance_cache.get(device_uuid)
if driver is None:
msg = 'Device({:s}) has not been added to this Device instance'.format(str(device_uuid))
raise OperationFailedException('MonitorDeviceKpi', extra_details=msg)
sample_type = request.kpi_descriptor.kpi_sample_type
attributes = {
'kpi_uuid' : request.kpi_id.kpi_id.uuid,
'kpi_description' : request.kpi_descriptor.kpi_description,
'kpi_sample_type' : grpc_to_enum__kpi_sample_type(sample_type),
'device_fk' : db_device,
'endpoint_fk' : db_endpoint,
'sampling_duration': request.sampling_duration_s,
'sampling_interval': request.sampling_interval_s,
}
result : Tuple[KpiModel, bool] = update_or_create_object(self.database, KpiModel, kpi_uuid, attributes)
db_kpi, updated = result
str_endpoint_monitor_key = key_to_str([str_endpoint_key, str(sample_type)])
db_endpoint_monitor : EndPointMonitorModel = get_object(
self.database, EndPointMonitorModel, str_endpoint_monitor_key, raise_if_not_found=False)
if db_endpoint_monitor is None:
msg = 'SampleType({:s}/{:s}) not supported for Device({:s})/EndPoint({:s}).'.format(
str(sample_type), str(KpiSampleType.Name(sample_type).upper().replace('KPISAMPLETYPE_', '')),
str(device_uuid), str(endpoint_uuid))
raise OperationFailedException('MonitorDeviceKpi', extra_details=msg)
endpoint_monitor_resource_key = re.sub('[^A-Za-z0-9]', '.', db_endpoint_monitor.resource_key)
str_endpoint_monitor_kpi_key = key_to_str([device_uuid, endpoint_monitor_resource_key], separator=':')
attributes = {
'endpoint_monitor_fk': db_endpoint_monitor,
'kpi_fk' : db_kpi,
}
result : Tuple[EndPointMonitorKpiModel, bool] = update_or_create_object(
self.database, EndPointMonitorKpiModel, str_endpoint_monitor_kpi_key, attributes)
db_endpoint_monitor_kpi, updated = result
resources_to_subscribe : List[Tuple[str, float, float]] = [] # key, sampling_duration, sampling_interval
resources_to_subscribe.append(
(db_endpoint_monitor.resource_key, db_kpi.sampling_duration, db_kpi.sampling_interval))
results_subscribestate = driver.SubscribeState(resources_to_subscribe)
errors = check_subscribe_errors(resources_to_subscribe, results_subscribestate)
if len(errors) > 0: raise OperationFailedException('MonitorDeviceKpi', extra_details=errors)
self.monitoring_loops.add(device_uuid, driver)
else:
db_kpi : KpiModel = get_object(
self.database, KpiModel, kpi_uuid, raise_if_not_found=False)
if db_kpi is None:
msg = 'Kpi({:s}) not found'.format(str(kpi_uuid))
raise OperationFailedException('MonitorDeviceKpi', extra_details=msg)
db_device : DeviceModel = get_object(
self.database, DeviceModel, db_kpi.device_fk, raise_if_not_found=False)
if db_device is None:
msg = 'Device({:s}) not found'.format(str(db_kpi.device_fk))
raise OperationFailedException('MonitorDeviceKpi', extra_details=msg)
device_uuid = db_device.device_uuid
db_endpoint : EndPointModel = get_object(
self.database, EndPointModel, db_kpi.endpoint_fk, raise_if_not_found=False)
if db_endpoint is None:
msg = 'EndPoint({:s}) not found'.format(str(db_kpi.endpoint_fk))
raise OperationFailedException('MonitorDeviceKpi', extra_details=msg)
endpoint_uuid = db_endpoint.endpoint_uuid
str_endpoint_key = db_endpoint.pk
kpi_sample_type : ORM_KpiSampleTypeEnum = db_kpi.kpi_sample_type
sample_type = kpi_sample_type.value
str_endpoint_monitor_key = key_to_str([str_endpoint_key, str(sample_type)])
db_endpoint_monitor : EndPointMonitorModel = get_object(
self.database, EndPointMonitorModel, str_endpoint_monitor_key, raise_if_not_found=False)
if db_endpoint_monitor is None:
msg = 'EndPointMonitor({:s}) not found.'.format(str(str_endpoint_monitor_key))
raise OperationFailedException('MonitorDeviceKpi', extra_details=msg)
endpoint_monitor_resource_key = re.sub('[^A-Za-z0-9]', '.', db_endpoint_monitor.resource_key)
str_endpoint_monitor_kpi_key = key_to_str([device_uuid, endpoint_monitor_resource_key], separator=':')
db_endpoint_monitor_kpi : EndPointMonitorKpiModel = get_object(
self.database, EndPointMonitorKpiModel, str_endpoint_monitor_kpi_key, raise_if_not_found=False)
if db_endpoint_monitor_kpi is None:
msg = 'EndPointMonitorKpi({:s}) not found.'.format(str(str_endpoint_monitor_kpi_key))
raise OperationFailedException('MonitorDeviceKpi', extra_details=msg)
resources_to_unsubscribe : List[Tuple[str, float, float]] = [] # key, sampling_duration, sampling_interval
resources_to_unsubscribe.append(
(db_endpoint_monitor.resource_key, db_kpi.sampling_duration, db_kpi.sampling_interval))
driver : _Driver = self.driver_instance_cache.get(device_uuid)
if driver is None:
msg = 'Device({:s}) has not been added to this Device instance'.format(str(device_uuid))
raise OperationFailedException('MonitorDeviceKpi', extra_details=msg)
results_unsubscribestate = driver.UnsubscribeState(resources_to_unsubscribe)
errors = check_unsubscribe_errors(resources_to_unsubscribe, results_unsubscribestate)
if len(errors) > 0: raise OperationFailedException('MonitorDeviceKpi', extra_details=errors)
db_endpoint_monitor_kpi.delete()
db_kpi.delete()
# There is one monitoring loop per device; keep them active since they are re-used by different monitoring
# requests.
#self.monitoring_loops.remove(device_uuid)
# Subscriptions are not stored as classical driver config.
# TODO: consider adding it somehow in the configuration.
# Warning: GetConfig might be very slow in OpenConfig devices
#running_config_rules = [
# (config_rule[0], json.dumps(config_rule[1], sort_keys=True))
# for config_rule in driver.GetConfig()
#]
#context_config_rules = {
# config_rule[1]: config_rule[2]
# for config_rule in get_config_rules(self.database, device_uuid, 'running')
#}
## each in context, not in running => delete in context
## each in running, not in context => add to context
## each in context and in running, context.value != running.value => update in context
#running_config_rules_actions : List[Tuple[ORM_ConfigActionEnum, str, str]] = []
#for config_rule_key,config_rule_value in running_config_rules:
# running_config_rules_actions.append((ORM_ConfigActionEnum.SET, config_rule_key, config_rule_value))
# context_config_rules.pop(config_rule_key, None)
#for context_rule_key,context_rule_value in context_config_rules.items():
# running_config_rules_actions.append((ORM_ConfigActionEnum.DELETE, context_rule_key, context_rule_value))
##msg = '[MonitorDeviceKpi] running_config_rules_action[{:d}]: {:s}'
##for i,running_config_rules_action in enumerate(running_config_rules_actions):
## LOGGER.info(msg.format(i, str(running_config_rules_action)))
#update_config(self.database, device_uuid, 'running', running_config_rules_actions)
sync_device_to_context(db_device, self.context_client)
return Empty()
else:
db_kpi : KpiModel = get_object(
self.database, KpiModel, kpi_uuid, raise_if_not_found=False)
if db_kpi is None:
msg = 'Kpi({:s}) not found'.format(str(kpi_uuid))
raise OperationFailedException('MonitorDeviceKpi', extra_details=msg)
db_device : DeviceModel = get_object(
self.database, DeviceModel, db_kpi.device_fk, raise_if_not_found=False)
if db_device is None:
msg = 'Device({:s}) not found'.format(str(db_kpi.device_fk))
raise OperationFailedException('MonitorDeviceKpi', extra_details=msg)
device_uuid = db_device.device_uuid
db_endpoint : EndPointModel = get_object(
self.database, EndPointModel, db_kpi.endpoint_fk, raise_if_not_found=False)
if db_endpoint is None:
msg = 'EndPoint({:s}) not found'.format(str(db_kpi.endpoint_fk))
raise OperationFailedException('MonitorDeviceKpi', extra_details=msg)
endpoint_uuid = db_endpoint.endpoint_uuid
str_endpoint_key = db_endpoint.pk
kpi_sample_type : ORM_KpiSampleTypeEnum = db_kpi.kpi_sample_type
sample_type = kpi_sample_type.value
str_endpoint_monitor_key = key_to_str([str_endpoint_key, str(sample_type)])
db_endpoint_monitor : EndPointMonitorModel = get_object(
self.database, EndPointMonitorModel, str_endpoint_monitor_key, raise_if_not_found=False)
if db_endpoint_monitor is None:
msg = 'EndPointMonitor({:s}) not found.'.format(str(str_endpoint_monitor_key))
raise OperationFailedException('MonitorDeviceKpi', extra_details=msg)
endpoint_monitor_resource_key = re.sub('[^A-Za-z0-9]', '.', db_endpoint_monitor.resource_key)
str_endpoint_monitor_kpi_key = key_to_str([device_uuid, endpoint_monitor_resource_key], separator=':')
db_endpoint_monitor_kpi : EndPointMonitorKpiModel = get_object(
self.database, EndPointMonitorKpiModel, str_endpoint_monitor_kpi_key, raise_if_not_found=False)
if db_endpoint_monitor_kpi is None:
msg = 'EndPointMonitorKpi({:s}) not found.'.format(str(str_endpoint_monitor_kpi_key))
raise OperationFailedException('MonitorDeviceKpi', extra_details=msg)
resources_to_unsubscribe : List[Tuple[str, float, float]] = [] # key, sampling_duration, sampling_interval
resources_to_unsubscribe.append(
(db_endpoint_monitor.resource_key, db_kpi.sampling_duration, db_kpi.sampling_interval))
driver : _Driver = self.driver_instance_cache.get(device_uuid)
if driver is None:
msg = 'Device({:s}) has not been added to this Device instance'.format(str(device_uuid))
raise OperationFailedException('MonitorDeviceKpi', extra_details=msg)
results_unsubscribestate = driver.UnsubscribeState(resources_to_unsubscribe)
errors = check_unsubscribe_errors(resources_to_unsubscribe, results_unsubscribestate)
if len(errors) > 0: raise OperationFailedException('MonitorDeviceKpi', extra_details=errors)
db_endpoint_monitor_kpi.delete()
db_kpi.delete()
# There is one monitoring loop per device; keep them active since they are re-used by different monitoring
# requests.
#self.monitoring_loops.remove(device_uuid)
# Subscriptions are not stored as classical driver config.
# TODO: consider adding it somehow in the configuration.
# Warning: GetConfig might be very slow in OpenConfig devices
#running_config_rules = [
# (config_rule[0], json.dumps(config_rule[1], sort_keys=True))
# for config_rule in driver.GetConfig()
#]
#context_config_rules = {
# config_rule[1]: config_rule[2]
# for config_rule in get_config_rules(self.database, device_uuid, 'running')
#}
## each in context, not in running => delete in context
## each in running, not in context => add to context
## each in context and in running, context.value != running.value => update in context
#running_config_rules_actions : List[Tuple[ORM_ConfigActionEnum, str, str]] = []
#for config_rule_key,config_rule_value in running_config_rules:
# running_config_rules_actions.append((ORM_ConfigActionEnum.SET, config_rule_key, config_rule_value))
# context_config_rules.pop(config_rule_key, None)
#for context_rule_key,context_rule_value in context_config_rules.items():
# running_config_rules_actions.append((ORM_ConfigActionEnum.DELETE, context_rule_key, context_rule_value))
##msg = '[MonitorDeviceKpi] running_config_rules_action[{:d}]: {:s}'
##for i,running_config_rules_action in enumerate(running_config_rules_actions):
## LOGGER.info(msg.format(i, str(running_config_rules_action)))
#update_config(self.database, device_uuid, 'running', running_config_rules_actions)
sync_device_to_context(db_device, self.context_client)
return Empty()
finally:
self.mutex_queues.signal_done(device_uuid)
......@@ -75,6 +75,5 @@ def set_endpoint_monitors(database : Database, db_endpoint : EndPointModel, grpc
str_endpoint_kpi_sample_type_key = key_to_str([db_endpoint_pk, str(orm_kpi_sample_type.value)])
update_or_create_object(database, EndPointMonitorModel, str_endpoint_kpi_sample_type_key, {
'endpoint_fk' : db_endpoint,
'resource_key' : '', # during initialization, allow empty value
'kpi_sample_type': orm_kpi_sample_type,
})
......@@ -2,7 +2,7 @@
export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/"
# Set the list of components, separated by spaces, you want to build images for, and deploy.
export TFS_COMPONENTS="context device service automation pathcomp slice compute webui"
export TFS_COMPONENTS="context device automation monitoring pathcomp service slice compute webui"
# Set the tag you want to use for your images.
export TFS_IMAGE_TAG="dev"
......
# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, uuid
from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID
from common.tools.object_factory.Context import json_context, json_context_id
from common.tools.object_factory.Device import (
json_device_emulated_connect_rules, json_device_emulated_datacenter_disabled,
json_device_emulated_packet_router_disabled, json_device_emulated_tapi_disabled, json_device_id)
from common.tools.object_factory.EndPoint import json_endpoints
from common.tools.object_factory.Link import get_link_uuid, json_link, json_link_id
from common.tools.object_factory.Service import get_service_uuid, json_service_l3nm_planned
from common.tools.object_factory.Topology import json_topology, json_topology_id
# if true, Device component is present and will infeer the endpoints from connect-rules
# if false, Device component is not present and device objects must contain preconfigured endpoints
ADD_CONNECT_RULES_TO_DEVICES = os.environ.get('ADD_CONNECT_RULES_TO_DEVICES', 'True')
ADD_CONNECT_RULES_TO_DEVICES = ADD_CONNECT_RULES_TO_DEVICES.upper() in {'T', 'TRUE', '1', 'Y', 'YES'}
def compose_router(device_uuid, endpoint_uuids, topology_id=None):
device_id = json_device_id(device_uuid)
r_endpoints = [(endpoint_uuid, 'copper', []) for endpoint_uuid in endpoint_uuids]
config_rules = json_device_emulated_connect_rules(r_endpoints) if ADD_CONNECT_RULES_TO_DEVICES else []
endpoints = json_endpoints(device_id, r_endpoints, topology_id=topology_id)
j_endpoints = [] if ADD_CONNECT_RULES_TO_DEVICES else endpoints
device = json_device_emulated_packet_router_disabled(device_uuid, config_rules=config_rules, endpoints=j_endpoints)
return device_id, endpoints, device
def compose_ols(device_uuid, endpoint_uuids, topology_id=None):
device_id = json_device_id(device_uuid)
r_endpoints = [(endpoint_uuid, 'optical', []) for endpoint_uuid in endpoint_uuids]
config_rules = json_device_emulated_connect_rules(r_endpoints) if ADD_CONNECT_RULES_TO_DEVICES else []
endpoints = json_endpoints(device_id, r_endpoints, topology_id=topology_id)
j_endpoints = [] if ADD_CONNECT_RULES_TO_DEVICES else endpoints
device = json_device_emulated_tapi_disabled(device_uuid, config_rules=config_rules, endpoints=j_endpoints)
return device_id, endpoints, device
def compose_datacenter(device_uuid, endpoint_uuids, topology_id=None):
device_id = json_device_id(device_uuid)
r_endpoints = [(endpoint_uuid, 'copper', []) for endpoint_uuid in endpoint_uuids]
config_rules = json_device_emulated_connect_rules(r_endpoints) if ADD_CONNECT_RULES_TO_DEVICES else []
endpoints = json_endpoints(device_id, r_endpoints, topology_id=topology_id)
j_endpoints = [] if ADD_CONNECT_RULES_TO_DEVICES else endpoints
device = json_device_emulated_datacenter_disabled(device_uuid, config_rules=config_rules, endpoints=j_endpoints)
return device_id, endpoints, device
def compose_link(endpoint_a, endpoint_z):
link_uuid = get_link_uuid(endpoint_a['endpoint_id'], endpoint_z['endpoint_id'])
link_id = json_link_id(link_uuid)
link = json_link(link_uuid, [endpoint_a['endpoint_id'], endpoint_z['endpoint_id']])
return link_id, link
def compose_service(endpoint_a, endpoint_z, constraints=[]):
service_uuid = get_service_uuid(endpoint_a['endpoint_id'], endpoint_z['endpoint_id'])
endpoint_ids = [endpoint_a['endpoint_id'], endpoint_z['endpoint_id']]
service = json_service_l3nm_planned(service_uuid, endpoint_ids=endpoint_ids, constraints=constraints)
return service
# ----- Context --------------------------------------------------------------------------------------------------------
CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_UUID)
CONTEXT = json_context(DEFAULT_CONTEXT_UUID)
# ----- Domains --------------------------------------------------------------------------------------------------------
# Overall network topology
TOPO_ADMIN_UUID = DEFAULT_TOPOLOGY_UUID
TOPO_ADMIN_ID = json_topology_id(TOPO_ADMIN_UUID, context_id=CONTEXT_ID)
TOPO_ADMIN = json_topology(TOPO_ADMIN_UUID, context_id=CONTEXT_ID)
# DataCenter #1 Network
TOPO_DC1_UUID = 'DC1'
TOPO_DC1_ID = json_topology_id(TOPO_DC1_UUID, context_id=CONTEXT_ID)
TOPO_DC1 = json_topology(TOPO_DC1_UUID, context_id=CONTEXT_ID)
# DataCenter #2 Network
TOPO_DC2_UUID = 'DC2'
TOPO_DC2_ID = json_topology_id(TOPO_DC2_UUID, context_id=CONTEXT_ID)
TOPO_DC2 = json_topology(TOPO_DC2_UUID, context_id=CONTEXT_ID)
# CellSite #1 Network
TOPO_CS1_UUID = 'CS1'
TOPO_CS1_ID = json_topology_id(TOPO_CS1_UUID, context_id=CONTEXT_ID)
TOPO_CS1 = json_topology(TOPO_CS1_UUID, context_id=CONTEXT_ID)
# CellSite #2 Network
TOPO_CS2_UUID = 'CS2'
TOPO_CS2_ID = json_topology_id(TOPO_CS2_UUID, context_id=CONTEXT_ID)
TOPO_CS2 = json_topology(TOPO_CS2_UUID, context_id=CONTEXT_ID)
# Transport Network Network
TOPO_TN_UUID = 'TN'
TOPO_TN_ID = json_topology_id(TOPO_TN_UUID, context_id=CONTEXT_ID)
TOPO_TN = json_topology(TOPO_TN_UUID, context_id=CONTEXT_ID)
# ----- Devices --------------------------------------------------------------------------------------------------------
# DataCenters
DEV_DC1GW_ID, DEV_DC1GW_EPS, DEV_DC1GW = compose_datacenter('DC1-GW', ['eth1', 'eth2', 'int'])
DEV_DC2GW_ID, DEV_DC2GW_EPS, DEV_DC2GW = compose_datacenter('DC2-GW', ['eth1', 'eth2', 'int'])
# CellSites
DEV_CS1GW1_ID, DEV_CS1GW1_EPS, DEV_CS1GW1 = compose_router('CS1-GW1', ['10/1', '1/1'])
DEV_CS1GW2_ID, DEV_CS1GW2_EPS, DEV_CS1GW2 = compose_router('CS1-GW2', ['10/1', '1/1'])
DEV_CS2GW1_ID, DEV_CS2GW1_EPS, DEV_CS2GW1 = compose_router('CS2-GW1', ['10/1', '1/1'])
DEV_CS2GW2_ID, DEV_CS2GW2_EPS, DEV_CS2GW2 = compose_router('CS2-GW2', ['10/1', '1/1'])
# Transport Network
tols_ep_uuids = [str(uuid.uuid4()).split('-')[-1] for _ in range(4)]
DEV_TOLS_ID, DEV_TOLS_EPS, DEV_TOLS = compose_ols('TN-OLS', tols_ep_uuids)
# ----- Links ----------------------------------------------------------------------------------------------------------
# InterDomain DC-CSGW
LINK_DC1GW_CS1GW1_ID, LINK_DC1GW_CS1GW1 = compose_link(DEV_DC1GW_EPS[0], DEV_CS1GW1_EPS[0])
LINK_DC1GW_CS1GW2_ID, LINK_DC1GW_CS1GW2 = compose_link(DEV_DC1GW_EPS[1], DEV_CS1GW2_EPS[0])
LINK_DC2GW_CS2GW1_ID, LINK_DC2GW_CS2GW1 = compose_link(DEV_DC2GW_EPS[0], DEV_CS2GW1_EPS[0])
LINK_DC2GW_CS2GW2_ID, LINK_DC2GW_CS2GW2 = compose_link(DEV_DC2GW_EPS[1], DEV_CS2GW2_EPS[0])
# InterDomain CSGW-TN
LINK_CS1GW1_TOLS_ID, LINK_CS1GW1_TOLS = compose_link(DEV_CS1GW1_EPS[1], DEV_TOLS_EPS[0])
LINK_CS1GW2_TOLS_ID, LINK_CS1GW2_TOLS = compose_link(DEV_CS1GW2_EPS[1], DEV_TOLS_EPS[1])
LINK_CS2GW1_TOLS_ID, LINK_CS2GW1_TOLS = compose_link(DEV_CS2GW1_EPS[1], DEV_TOLS_EPS[2])
LINK_CS2GW2_TOLS_ID, LINK_CS2GW2_TOLS = compose_link(DEV_CS2GW2_EPS[1], DEV_TOLS_EPS[3])
# ----- WIM Service Settings -------------------------------------------------------------------------------------------
WIM_USERNAME = 'admin'
WIM_PASSWORD = 'admin'
def mapping(site_id, ce_endpoint_id, pe_device_id, priority=None, redundant=[]):
ce_endpoint_id = ce_endpoint_id['endpoint_id']
ce_device_uuid = ce_endpoint_id['device_id']['device_uuid']['uuid']
ce_endpoint_uuid = ce_endpoint_id['endpoint_uuid']['uuid']
pe_device_uuid = pe_device_id['device_uuid']['uuid']
service_endpoint_id = '{:s}:{:s}:{:s}'.format(site_id, ce_device_uuid, ce_endpoint_uuid)
bearer = '{:s}:{:s}'.format(ce_device_uuid, pe_device_uuid)
_mapping = {
'service_endpoint_id': service_endpoint_id,
'datacenter_id': site_id, 'device_id': ce_device_uuid, 'device_interface_id': ce_endpoint_uuid,
'service_mapping_info': {
'site-id': site_id,
'bearer': {'bearer-reference': bearer},
}
}
if priority is not None: _mapping['service_mapping_info']['priority'] = priority
if len(redundant) > 0: _mapping['service_mapping_info']['redundant'] = redundant
return service_endpoint_id, _mapping
WIM_SEP_DC1_PRI, WIM_MAP_DC1_PRI = mapping('DC1', DEV_DC1GW_EPS[0], DEV_CS1GW1_ID, priority=10, redundant=['DC1:DC1-GW:eth2'])
WIM_SEP_DC1_SEC, WIM_MAP_DC1_SEC = mapping('DC1', DEV_DC1GW_EPS[1], DEV_CS1GW2_ID, priority=20, redundant=['DC1:DC1-GW:eth1'])
WIM_SEP_DC2_PRI, WIM_MAP_DC2_PRI = mapping('DC2', DEV_DC2GW_EPS[0], DEV_CS2GW1_ID, priority=10, redundant=['DC2:DC2-GW:eth2'])
WIM_SEP_DC2_SEC, WIM_MAP_DC2_SEC = mapping('DC2', DEV_DC2GW_EPS[1], DEV_CS2GW2_ID, priority=20, redundant=['DC2:DC2-GW:eth1'])
WIM_MAPPING = [WIM_MAP_DC1_PRI, WIM_MAP_DC1_SEC, WIM_MAP_DC2_PRI, WIM_MAP_DC2_SEC]
WIM_SRV_VLAN_ID = 300
WIM_SERVICE_TYPE = 'ELAN'
WIM_SERVICE_CONNECTION_POINTS = [
{'service_endpoint_id': WIM_SEP_DC1_PRI,
'service_endpoint_encapsulation_type': 'dot1q',
'service_endpoint_encapsulation_info': {'vlan': WIM_SRV_VLAN_ID}},
{'service_endpoint_id': WIM_SEP_DC2_PRI,
'service_endpoint_encapsulation_type': 'dot1q',
'service_endpoint_encapsulation_info': {'vlan': WIM_SRV_VLAN_ID}},
]
# ----- Containers -----------------------------------------------------------------------------------------------------
CONTEXTS = [ CONTEXT ]
TOPOLOGIES = [ TOPO_ADMIN, TOPO_DC1, TOPO_DC2, TOPO_CS1, TOPO_CS2, TOPO_TN ]
DEVICES = [ DEV_DC1GW, DEV_DC2GW,
DEV_CS1GW1, DEV_CS1GW2, DEV_CS2GW1, DEV_CS2GW2,
DEV_TOLS,
]
LINKS = [ LINK_DC1GW_CS1GW1, LINK_DC1GW_CS1GW2, LINK_DC2GW_CS2GW1, LINK_DC2GW_CS2GW2,
LINK_CS1GW1_TOLS, LINK_CS1GW2_TOLS, LINK_CS2GW1_TOLS, LINK_CS2GW2_TOLS,
]
OBJECTS_PER_TOPOLOGY = [
(TOPO_ADMIN_ID,
[DEV_DC1GW_ID, DEV_DC2GW_ID, DEV_CS1GW1_ID, DEV_CS1GW2_ID, DEV_CS2GW1_ID, DEV_CS2GW2_ID, DEV_TOLS_ID],
[LINK_DC1GW_CS1GW1_ID, LINK_DC1GW_CS1GW2_ID, LINK_DC2GW_CS2GW1_ID, LINK_DC2GW_CS2GW2_ID],
),
(TOPO_DC1_ID,
[DEV_DC1GW_ID],
[]),
(TOPO_DC2_ID,
[DEV_DC2GW_ID],
[]),
(TOPO_CS1_ID,
[DEV_CS1GW1_ID, DEV_CS1GW2_ID],
[]),
(TOPO_CS2_ID,
[DEV_CS2GW1_ID, DEV_CS2GW2_ID],
[]),
(TOPO_TN_ID,
[DEV_TOLS_ID],
[]),
]
......@@ -25,7 +25,7 @@ from common.tools.object_factory.Topology import json_topology, json_topology_id
# if true, Device component is present and will infeer the endpoints from connect-rules
# if false, Device component is not present and device objects must contain preconfigured endpoints
ADD_CONNECT_RULES_TO_DEVICES = os.environ.get('ADD_CONNECT_RULES_TO_DEVICES', 'False')
ADD_CONNECT_RULES_TO_DEVICES = os.environ.get('ADD_CONNECT_RULES_TO_DEVICES', 'True')
ADD_CONNECT_RULES_TO_DEVICES = ADD_CONNECT_RULES_TO_DEVICES.upper() in {'T', 'TRUE', '1', 'Y', 'YES'}
def compose_router(device_uuid, endpoint_uuids, topology_id=None):
......
......@@ -18,8 +18,9 @@ from context.client.ContextClient import ContextClient
from device.client.DeviceClient import DeviceClient
from .Fixtures import context_client, device_client
#from .Objects_BigNet import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES
from .Objects_DC_CSGW_TN import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, OBJECTS_PER_TOPOLOGY
#from .Objects_DC_CSGW_TN_OLS import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES
#from .Objects_DC_CSGW_TN import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, OBJECTS_PER_TOPOLOGY
#from .Objects_DC_CSGW_TN_OLS import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, OBJECTS_PER_TOPOLOGY
from .Objects_DC_CSGW_OLS import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, OBJECTS_PER_TOPOLOGY
LOGGER = logging.getLogger(__name__)
......
......@@ -19,8 +19,9 @@ from context.client.ContextClient import ContextClient
from device.client.DeviceClient import DeviceClient
from .Fixtures import context_client, device_client
#from .Objects_BigNet import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES
from .Objects_DC_CSGW_TN import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES
#from .Objects_DC_CSGW_TN import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES
#from .Objects_DC_CSGW_TN_OLS import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES
from .Objects_DC_CSGW_OLS import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES
LOGGER = logging.getLogger(__name__)
......
......@@ -20,10 +20,13 @@ from context.client.ContextClient import ContextClient
from .Fixtures import context_client, osm_wim
#from .Objects_BigNet import (
# CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, WIM_SERVICE_CONNECTION_POINTS, WIM_SERVICE_TYPE)
from .Objects_DC_CSGW_TN import (
CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, WIM_SERVICE_CONNECTION_POINTS, WIM_SERVICE_TYPE)
#from .Objects_DC_CSGW_TN import (
# CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, WIM_SERVICE_CONNECTION_POINTS, WIM_SERVICE_TYPE)
#from .Objects_DC_CSGW_TN_OLS import (
# CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, WIM_SERVICE_CONNECTION_POINTS, WIM_SERVICE_TYPE)
from .Objects_DC_CSGW_OLS import (
CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, WIM_SERVICE_CONNECTION_POINTS, WIM_SERVICE_TYPE)
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.DEBUG)
......
......@@ -26,10 +26,12 @@ from context.client.ContextClient import ContextClient
from .Fixtures import context_client, osm_wim
#from .Objects_BigNet import (
# CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, WIM_SERVICE_CONNECTION_POINTS, WIM_SERVICE_TYPE)
from .Objects_DC_CSGW_TN import (
CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, WIM_SERVICE_CONNECTION_POINTS, WIM_SERVICE_TYPE)
#from .Objects_DC_CSGW_TN import (
# CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, WIM_SERVICE_CONNECTION_POINTS, WIM_SERVICE_TYPE)
#from .Objects_DC_CSGW_TN_OLS import (
# CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, WIM_SERVICE_CONNECTION_POINTS, WIM_SERVICE_TYPE)
from .Objects_DC_CSGW_TN_OLS import (
CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, WIM_SERVICE_CONNECTION_POINTS, WIM_SERVICE_TYPE)
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.DEBUG)
......
......@@ -2,7 +2,7 @@
export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/"
# Set the list of components, separated by spaces, you want to build images for, and deploy.
export TFS_COMPONENTS="context device automation service compute monitoring webui"
export TFS_COMPONENTS="context device automation monitoring pathcomp service slice compute webui"
# Set the tag you want to use for your images.
export TFS_IMAGE_TAG="dev"
......
{"overwrite": true, "folderId": 0, "dashboard":
{
"id": null,
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "datasource",
"uid": "grafana"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"target": {
"limit": 100,
"matchAny": false,
"tags": [],
"type": "dashboard"
},
"type": "dashboard"
}
]
},
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
"iteration": 1664814762635,
"links": [],
"liveNow": false,
"panels": [
{
"datasource": {
"type": "postgres",
"uid": "monitoringdb"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "smooth",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "always",
"spanNulls": true,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": [
{
"matcher": {
"id": "byRegexp",
"options": ".*PACKETS_.*"
},
"properties": [
{
"id": "custom.axisPlacement",
"value": "left"
},
{
"id": "unit",
"value": "pps"
},
{
"id": "custom.axisLabel",
"value": "Packets / sec"
},
{
"id": "custom.axisSoftMin",
"value": 0
}
]
},
{
"matcher": {
"id": "byRegexp",
"options": ".*BYTES_.*"
},
"properties": [
{
"id": "custom.axisPlacement",
"value": "right"
},
{
"id": "unit",
"value": "Bps"
},
{
"id": "custom.axisLabel",
"value": "Bytes / sec"
},
{
"id": "custom.axisSoftMin",
"value": 0
}
]
}
]
},
"gridPos": {
"h": 19,
"w": 24,
"x": 0,
"y": 0
},
"id": 2,
"options": {
"legend": {
"calcs": [
"first",
"min",
"mean",
"max",
"lastNotNull"
],
"displayMode": "table",
"placement": "right"
},
"tooltip": {
"mode": "multi",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "postgres",
"uid": "monitoringdb"
},
"format": "time_series",
"group": [],
"hide": false,
"metricColumn": "kpi_value",
"rawQuery": true,
"rawSql": "SELECT\r\n $__time(timestamp), kpi_value AS metric, device_id, endpoint_id, kpi_sample_type\r\nFROM\r\n monitoring\r\nWHERE\r\n $__timeFilter(timestamp) AND device_id IN ($device_id) AND endpoint_id IN ($endpoint_id) AND kpi_sample_type IN ($kpi_sample_type)\r\nGROUP BY\r\n device_id, endpoint_id, kpi_sample_type\r\nORDER BY\r\n timestamp\r\n",
"refId": "A",
"select": [
[
{
"params": [
"kpi_value"
],
"type": "column"
}
]
],
"table": "monitoring",
"timeColumn": "timestamp",
"where": [
{
"name": "",
"params": [
"device_id",
"IN",
"$device_id"
],
"type": "expression"
}
]
}
],
"title": "L3 Monitoring Packets/Bytes Received/Sent",
"transformations": [
{
"id": "renameByRegex",
"options": {
"regex": "metric {device_id=\\\"([^\\\"]+)\\\", endpoint_id=\\\"([^\\\"]+)\\\", kpi_sample_type=\\\"([^\\\"]+)\\\"}",
"renamePattern": "$3 ($1 $2)"
}
}
],
"type": "timeseries"
}
],
"refresh": "5s",
"schemaVersion": 36,
"style": "dark",
"tags": [],
"templating": {
"list": [
{
"current": {
"selected": true,
"text": [
"All"
],
"value": [
"$__all"
]
},
"datasource": {
"type": "postgres",
"uid": "monitoringdb"
},
"definition": "SELECT DISTINCT device_id FROM monitoring;",
"hide": 0,
"includeAll": true,
"label": "Device",
"multi": true,
"name": "device_id",
"options": [],
"query": "SELECT DISTINCT device_id FROM monitoring;",
"refresh": 2,
"regex": "",
"skipUrlSync": false,
"sort": 0,
"type": "query"
},
{
"current": {
"selected": false,
"text": "All",
"value": "$__all"
},
"datasource": {
"type": "postgres",
"uid": "monitoringdb"
},
"definition": "SELECT DISTINCT endpoint_id FROM monitoring WHERE device_id IN (${device_id})",
"hide": 0,
"includeAll": true,
"label": "EndPoint",
"multi": true,
"name": "endpoint_id",
"options": [],
"query": "SELECT DISTINCT endpoint_id FROM monitoring WHERE device_id IN (${device_id})",
"refresh": 2,
"regex": "",
"skipUrlSync": false,
"sort": 0,
"type": "query"
},
{
"current": {
"selected": true,
"text": [
"PACKETS_RECEIVED",
"PACKETS_TRANSMITTED"
],
"value": [
"PACKETS_RECEIVED",
"PACKETS_TRANSMITTED"
]
},
"datasource": {
"type": "postgres",
"uid": "monitoringdb"
},
"definition": "SELECT DISTINCT kpi_sample_type FROM monitoring;",
"hide": 0,
"includeAll": true,
"label": "Kpi Sample Type",
"multi": true,
"name": "kpi_sample_type",
"options": [],
"query": "SELECT DISTINCT kpi_sample_type FROM monitoring;",
"refresh": 2,
"regex": "",
"skipUrlSync": false,
"sort": 0,
"type": "query"
}
]
},
"time": {
"from": "now-15m",
"to": "now"
},
"timepicker": {},
"timezone": "",
"title": "L3 Monitoring",
"uid": "tf-l3-monit",
"version": 1,
"weekStart": ""
}
}
......@@ -13,6 +13,7 @@
# limitations under the License.
import copy, json, logging
from typing import Optional
from flask import jsonify, redirect, render_template, Blueprint, flash, session, url_for, request
from common.proto.context_pb2 import Connection, Context, Device, Empty, Link, Service, Slice, Topology, ContextIdList
from common.tools.grpc.Tools import grpc_message_to_json_string
......@@ -43,9 +44,10 @@ ENTITY_TO_TEXT = {
}
ACTION_TO_TEXT = {
# action => infinitive, past
'add' : ('Add', 'Added'),
'update' : ('Update', 'Updated'),
# action => infinitive, past
'add' : ('Add', 'Added'),
'update' : ('Update', 'Updated'),
'config' : ('Configure', 'Configured'),
}
def process_descriptor(entity_name, action_name, grpc_method, grpc_class, entities):
......@@ -94,14 +96,14 @@ def process_descriptors(descriptors):
topology['device_ids'] = []
topology['link_ids'] = []
process_descriptor('context', 'add', context_client.SetContext, Context, contexts_add )
process_descriptor('topology', 'add', context_client.SetTopology, Topology, topologies_add)
process_descriptor('device', 'add', context_client.SetDevice, Device, devices )
process_descriptor('link', 'add', context_client.SetLink, Link, links )
process_descriptor('service', 'add', context_client.SetService, Service, services )
process_descriptor('context', 'update', context_client.SetContext, Context, contexts )
process_descriptor('topology', 'update', context_client.SetTopology, Topology, topologies )
process_descriptor('slice', 'add', context_client.SetSlice, Slice, slices )
process_descriptor('context', 'add', context_client.SetContext, Context, contexts_add )
process_descriptor('topology', 'add', context_client.SetTopology, Topology, topologies_add)
process_descriptor('device', 'add', context_client.SetDevice, Device, devices )
process_descriptor('link', 'add', context_client.SetLink, Link, links )
process_descriptor('service', 'add', context_client.SetService, Service, services )
process_descriptor('context', 'update', context_client.SetContext, Context, contexts )
process_descriptor('topology', 'update', context_client.SetTopology, Topology, topologies )
process_descriptor('slice', 'add', context_client.SetSlice, Slice, slices )
process_descriptor('connection', 'add', context_client.SetConnection, Connection, connections )
context_client.close()
return
......@@ -111,6 +113,28 @@ def process_descriptors(descriptors):
# in normal mode, connections should not be set
assert len(connections) == 0
devices_add = []
devices_config = []
for device in devices:
connect_rules = []
config_rules = []
for config_rule in device.get('device_config', {}).get('config_rules', []):
custom_resource_key : Optional[str] = config_rule.get('custom', {}).get('resource_key')
if custom_resource_key is not None and custom_resource_key.startswith('_connect/'):
connect_rules.append(config_rule)
else:
config_rules.append(config_rule)
if len(connect_rules) > 0:
device_add = copy.deepcopy(device)
device_add['device_endpoints'] = []
device_add['device_config'] = {'config_rules': connect_rules}
devices_add.append(device_add)
if len(config_rules) > 0:
device['device_config'] = {'config_rules': config_rules}
devices_config.append(device)
services_add = []
for service in services:
service_copy = copy.deepcopy(service)
......@@ -132,14 +156,15 @@ def process_descriptors(descriptors):
service_client.connect()
slice_client.connect()
process_descriptor('context', 'add', context_client.SetContext, Context, contexts )
process_descriptor('topology', 'add', context_client.SetTopology, Topology, topologies )
process_descriptor('device', 'add', device_client .AddDevice, Device, devices )
process_descriptor('link', 'add', context_client.SetLink, Link, links )
process_descriptor('service', 'add', service_client.CreateService, Service, services_add)
process_descriptor('service', 'update', service_client.UpdateService, Service, services )
process_descriptor('slice', 'add', slice_client.CreateSlice, Slice, slices_add )
process_descriptor('slice', 'update', slice_client.UpdateSlice, Slice, slices )
process_descriptor('context', 'add', context_client.SetContext, Context, contexts )
process_descriptor('topology', 'add', context_client.SetTopology, Topology, topologies )
process_descriptor('device', 'add', device_client .AddDevice, Device, devices_add )
process_descriptor('device', 'config', device_client .ConfigureDevice, Device, devices_config)
process_descriptor('link', 'add', context_client.SetLink, Link, links )
process_descriptor('service', 'add', service_client.CreateService, Service, services_add )
process_descriptor('service', 'update', service_client.UpdateService, Service, services )
process_descriptor('slice', 'add', slice_client .CreateSlice, Slice, slices_add )
process_descriptor('slice', 'update', slice_client .UpdateSlice, Slice, slices )
slice_client.close()
service_client.close()
......
......@@ -83,9 +83,9 @@
<a class="nav-link" href="{{ url_for('slice.home') }}">Slice</a>
{% endif %}
</li>
<!--<li class="nav-item">
<li class="nav-item">
<a class="nav-link" href="/grafana" id="grafana_link" target="grafana">Grafana</a>
</li>-->
</li>
<li class="nav-item">
<a class="nav-link" href="{{ url_for('main.debug') }}">Debug</a>
......
......@@ -45,6 +45,7 @@
<div class="col-sm-4">
<b>UUID: </b>{{ device.device_id.device_uuid.uuid }}<br><br>
<b>Type: </b>{{ device.device_type }}<br><br>
<b>Status: </b> {{ dose.Name(device.device_operational_status).replace('DEVICEOPERATIONALSTATUS_', '') }}<br>
<b>Drivers: </b>
<ul>
{% for driver in device.device_drivers %}
......