Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • tfs/controller
1 result
Show changes
Commits on Source (6)
  • Lluis Gifre Renom's avatar
    Integration fix resolution: · f691ef72
    Lluis Gifre Renom authored
    Device:
    - fixed problem with subscription handling
    f691ef72
  • Lluis Gifre Renom's avatar
    WebUI: · ffbf7393
    Lluis Gifre Renom authored
    - Activated "Grafana" link in top menu
    - Implemented onboarding of devices + configuration from JSON descriptor files
    - added device status in device details page
    - created new grafana dashboard for new PostgreSQL-based monitoring database
    - updated deploy.sh script with correct configuration of Grafana monitoring datasource and new dashboard
    ffbf7393
  • Lluis Gifre Renom's avatar
    Monitoring component: · d743ddd1
    Lluis Gifre Renom authored
    - corrected deployment name in manifest file
    - added resource requests and limits for monitoring server container
    d743ddd1
  • Lluis Gifre Renom's avatar
    Functional tests: · 85a2d9a2
    Lluis Gifre Renom authored
    - corrected components imported by the tests and default my_deploy.sh
    85a2d9a2
  • Lluis Gifre Renom's avatar
    ECOC'22 functional test: · 8806bbf9
    Lluis Gifre Renom authored
    - Added script to generate topologies and services using DC_CSGW_OLS
    - Corrected script to generate topologies and services using DC_CSGW_TN_OLS
    - Corrected resources imported by functional tests
    8806bbf9
  • Lluis Gifre Renom's avatar
    Common and Device: · 10a63cc4
    Lluis Gifre Renom authored
    - Implemented generic MutexQueues class
    - Implemented sequentialization of operations in Device component to prevent data corruption and race conditions
    10a63cc4
Showing
with 1029 additions and 339 deletions
......@@ -247,19 +247,22 @@ if [[ "$TFS_COMPONENTS" == *"webui"* ]] && [[ "$TFS_COMPONENTS" == *"monitoring"
"url" : "monitoringservice:8812",
"database" : "monitoring",
"user" : "admin",
"password" : "quest",
"basicAuth" : false,
"basicAuth": false,
"isDefault": true,
"jsonData" : {
"sslmode" : "disable",
"postgresVersion" : 1100,
"tlsAuth" : false,
"tlsAuthWithCACert": false,
"sslmode" : "disable",
"postgresVersion" : 1100,
"maxOpenConns" : 0,
"maxIdleConns" : 2,
"connMaxLifetime" : 14400,
"tlsAuth" : false,
"tlsAuthWithCACert" : false,
"timescaledb" : false,
"tlsConfigurationMethod": "file-path",
"tlsSkipVerify": true
"tlsSkipVerify" : true
},
"secureJsonFields" : {
"password" : true
"secureJsonData": {
"password": "quest"
}
}' ${GRAFANA_URL_UPDATED}/api/datasources
echo
......@@ -267,7 +270,7 @@ if [[ "$TFS_COMPONENTS" == *"webui"* ]] && [[ "$TFS_COMPONENTS" == *"monitoring"
# Create Monitoring Dashboard
# Ref: https://grafana.com/docs/grafana/latest/http_api/dashboard/
curl -X POST -H "Content-Type: application/json" \
-d '@src/webui/grafana_dashboard.json' \
-d '@src/webui/grafana_dashboard_psql.json' \
${GRAFANA_URL_UPDATED}/api/dashboards/db
echo
......
......@@ -51,7 +51,7 @@ spec:
apiVersion: apps/v1
kind: Deployment
metadata:
name: monitoringserver
name: monitoringservice
spec:
selector:
matchLabels:
......@@ -89,6 +89,13 @@ spec:
livenessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:7070"]
resources:
requests:
cpu: 250m
memory: 512Mi
limits:
cpu: 700m
memory: 1024Mi
---
apiVersion: v1
kind: Service
......
# Set the URL of your local Docker registry where the images will be uploaded to.
export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/"
# Set the list of components, separated by spaces, you want to build images for, and deploy.
......@@ -6,10 +7,16 @@ export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/"
# interdomain slice pathcomp dlt
# dbscanserving opticalattackmitigator opticalattackdetector
# l3_attackmitigator l3_centralizedattackdetector l3_distributedattackdetector
export TFS_COMPONENTS="context device automation pathcomp service slice compute monitoring webui"
export TFS_COMPONENTS="context device automation monitoring pathcomp service slice compute webui"
# Set the tag you want to use for your images.
export TFS_IMAGE_TAG="dev"
# Set the name of the Kubernetes namespace to deploy to.
export TFS_K8S_NAMESPACE="tfs"
# Set additional manifest files to be applied after the deployment
export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml"
# Set the neew Grafana admin password
export TFS_GRAFANA_PASSWORD="admin123+"
# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# MutexQueues:
# ------------
# This class enables to schedule and serialize operations concurrently issued
# over a number of resources. For instance, when multiple components want to
# configure devices through the Device component, configuration operations
# have to be serialized to prevent data corruptions, and race conditions, etc.
# Usage Example:
# class Servicer():
# def __init__(self):
# # init other stuff
# self.drivers = dict()
# self.mutex_queues = MutexQueues()
#
# def configure_device(self, device_uuid, settings):
# self.mutex_queues.wait_my_turn(device_uuid)
# driver = self.drivers.get(device_uuid)
# if driver is None:
# driver = Driver(device_uuid)
# self.drivers[device_uuid] = driver
# driver.configure(settings)
# self.mutex_queues.signal_done(device_uuid)
import threading
from queue import Queue
from typing import Dict
class MutexQueues:
def __init__(self) -> None:
# lock to protect dictionary updates
self.lock = threading.Lock()
# dictionaty of queues of mutexes: queue_name => queue[mutex]
# first mutex is the running one
self.mutex_queues : Dict[str, Queue[threading.Event]] = dict()
def wait_my_turn(self, queue_name : str) -> None:
# create my mutex and enqueue it
mutex = threading.Event()
with self.lock:
queue : Queue = self.mutex_queues.setdefault(queue_name, Queue())
first_in_queue = (queue.qsize() == 0)
queue.put_nowait(mutex)
# if I'm the first in the queue upon addition, means there are no running tasks
# directly return without waiting
if first_in_queue: return
# otherwise, wait for my turn in the queue
mutex.wait()
def signal_done(self, queue_name : str) -> None:
# I'm done with my work
with self.lock:
queue : Queue = self.mutex_queues.setdefault(queue_name, Queue())
# remove muself from the queue
queue.get_nowait()
# if there are no other tasks queued, return
if queue.qsize() == 0: return
# otherwise, signal the next task in the queue to start
next_mutex : threading.Event = queue.queue[0]
next_mutex.set()
# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
......@@ -23,10 +23,15 @@ from .driver_api.DriverInstanceCache import DriverInstanceCache
from .DeviceServiceServicerImpl import DeviceServiceServicerImpl
from .MonitoringLoops import MonitoringLoops
# Custom gRPC settings
# Multiple clients might keep connections alive waiting for RPC methods to be executed.
# Requests needs to be serialized to ensure correct device configurations
GRPC_MAX_WORKERS = 200
class DeviceService(GenericGrpcService):
def __init__(self, driver_instance_cache : DriverInstanceCache, cls_name: str = __name__) -> None:
port = get_service_port_grpc(ServiceNameEnum.DEVICE)
super().__init__(port, cls_name=cls_name)
super().__init__(port, max_workers=GRPC_MAX_WORKERS, cls_name=cls_name)
database = Database(get_database_backend(backend=BackendEnum.INMEMORY))
self.monitoring_loops = MonitoringLoops(database)
self.device_servicer = DeviceServiceServicerImpl(database, driver_instance_cache, self.monitoring_loops)
......
......@@ -75,6 +75,5 @@ def set_endpoint_monitors(database : Database, db_endpoint : EndPointModel, grpc
str_endpoint_kpi_sample_type_key = key_to_str([db_endpoint_pk, str(orm_kpi_sample_type.value)])
update_or_create_object(database, EndPointMonitorModel, str_endpoint_kpi_sample_type_key, {
'endpoint_fk' : db_endpoint,
'resource_key' : '', # during initialization, allow empty value
'kpi_sample_type': orm_kpi_sample_type,
})
......@@ -2,7 +2,7 @@
export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/"
# Set the list of components, separated by spaces, you want to build images for, and deploy.
export TFS_COMPONENTS="context device service automation pathcomp slice compute webui"
export TFS_COMPONENTS="context device automation monitoring pathcomp service slice compute webui"
# Set the tag you want to use for your images.
export TFS_IMAGE_TAG="dev"
......
# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, uuid
from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID
from common.tools.object_factory.Context import json_context, json_context_id
from common.tools.object_factory.Device import (
json_device_emulated_connect_rules, json_device_emulated_datacenter_disabled,
json_device_emulated_packet_router_disabled, json_device_emulated_tapi_disabled, json_device_id)
from common.tools.object_factory.EndPoint import json_endpoints
from common.tools.object_factory.Link import get_link_uuid, json_link, json_link_id
from common.tools.object_factory.Service import get_service_uuid, json_service_l3nm_planned
from common.tools.object_factory.Topology import json_topology, json_topology_id
# if true, Device component is present and will infeer the endpoints from connect-rules
# if false, Device component is not present and device objects must contain preconfigured endpoints
ADD_CONNECT_RULES_TO_DEVICES = os.environ.get('ADD_CONNECT_RULES_TO_DEVICES', 'True')
ADD_CONNECT_RULES_TO_DEVICES = ADD_CONNECT_RULES_TO_DEVICES.upper() in {'T', 'TRUE', '1', 'Y', 'YES'}
def compose_router(device_uuid, endpoint_uuids, topology_id=None):
device_id = json_device_id(device_uuid)
r_endpoints = [(endpoint_uuid, 'copper', []) for endpoint_uuid in endpoint_uuids]
config_rules = json_device_emulated_connect_rules(r_endpoints) if ADD_CONNECT_RULES_TO_DEVICES else []
endpoints = json_endpoints(device_id, r_endpoints, topology_id=topology_id)
j_endpoints = [] if ADD_CONNECT_RULES_TO_DEVICES else endpoints
device = json_device_emulated_packet_router_disabled(device_uuid, config_rules=config_rules, endpoints=j_endpoints)
return device_id, endpoints, device
def compose_ols(device_uuid, endpoint_uuids, topology_id=None):
device_id = json_device_id(device_uuid)
r_endpoints = [(endpoint_uuid, 'optical', []) for endpoint_uuid in endpoint_uuids]
config_rules = json_device_emulated_connect_rules(r_endpoints) if ADD_CONNECT_RULES_TO_DEVICES else []
endpoints = json_endpoints(device_id, r_endpoints, topology_id=topology_id)
j_endpoints = [] if ADD_CONNECT_RULES_TO_DEVICES else endpoints
device = json_device_emulated_tapi_disabled(device_uuid, config_rules=config_rules, endpoints=j_endpoints)
return device_id, endpoints, device
def compose_datacenter(device_uuid, endpoint_uuids, topology_id=None):
device_id = json_device_id(device_uuid)
r_endpoints = [(endpoint_uuid, 'copper', []) for endpoint_uuid in endpoint_uuids]
config_rules = json_device_emulated_connect_rules(r_endpoints) if ADD_CONNECT_RULES_TO_DEVICES else []
endpoints = json_endpoints(device_id, r_endpoints, topology_id=topology_id)
j_endpoints = [] if ADD_CONNECT_RULES_TO_DEVICES else endpoints
device = json_device_emulated_datacenter_disabled(device_uuid, config_rules=config_rules, endpoints=j_endpoints)
return device_id, endpoints, device
def compose_link(endpoint_a, endpoint_z):
link_uuid = get_link_uuid(endpoint_a['endpoint_id'], endpoint_z['endpoint_id'])
link_id = json_link_id(link_uuid)
link = json_link(link_uuid, [endpoint_a['endpoint_id'], endpoint_z['endpoint_id']])
return link_id, link
def compose_service(endpoint_a, endpoint_z, constraints=[]):
service_uuid = get_service_uuid(endpoint_a['endpoint_id'], endpoint_z['endpoint_id'])
endpoint_ids = [endpoint_a['endpoint_id'], endpoint_z['endpoint_id']]
service = json_service_l3nm_planned(service_uuid, endpoint_ids=endpoint_ids, constraints=constraints)
return service
# ----- Context --------------------------------------------------------------------------------------------------------
CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_UUID)
CONTEXT = json_context(DEFAULT_CONTEXT_UUID)
# ----- Domains --------------------------------------------------------------------------------------------------------
# Overall network topology
TOPO_ADMIN_UUID = DEFAULT_TOPOLOGY_UUID
TOPO_ADMIN_ID = json_topology_id(TOPO_ADMIN_UUID, context_id=CONTEXT_ID)
TOPO_ADMIN = json_topology(TOPO_ADMIN_UUID, context_id=CONTEXT_ID)
# DataCenter #1 Network
TOPO_DC1_UUID = 'DC1'
TOPO_DC1_ID = json_topology_id(TOPO_DC1_UUID, context_id=CONTEXT_ID)
TOPO_DC1 = json_topology(TOPO_DC1_UUID, context_id=CONTEXT_ID)
# DataCenter #2 Network
TOPO_DC2_UUID = 'DC2'
TOPO_DC2_ID = json_topology_id(TOPO_DC2_UUID, context_id=CONTEXT_ID)
TOPO_DC2 = json_topology(TOPO_DC2_UUID, context_id=CONTEXT_ID)
# CellSite #1 Network
TOPO_CS1_UUID = 'CS1'
TOPO_CS1_ID = json_topology_id(TOPO_CS1_UUID, context_id=CONTEXT_ID)
TOPO_CS1 = json_topology(TOPO_CS1_UUID, context_id=CONTEXT_ID)
# CellSite #2 Network
TOPO_CS2_UUID = 'CS2'
TOPO_CS2_ID = json_topology_id(TOPO_CS2_UUID, context_id=CONTEXT_ID)
TOPO_CS2 = json_topology(TOPO_CS2_UUID, context_id=CONTEXT_ID)
# Transport Network Network
TOPO_TN_UUID = 'TN'
TOPO_TN_ID = json_topology_id(TOPO_TN_UUID, context_id=CONTEXT_ID)
TOPO_TN = json_topology(TOPO_TN_UUID, context_id=CONTEXT_ID)
# ----- Devices --------------------------------------------------------------------------------------------------------
# DataCenters
DEV_DC1GW_ID, DEV_DC1GW_EPS, DEV_DC1GW = compose_datacenter('DC1-GW', ['eth1', 'eth2', 'int'])
DEV_DC2GW_ID, DEV_DC2GW_EPS, DEV_DC2GW = compose_datacenter('DC2-GW', ['eth1', 'eth2', 'int'])
# CellSites
DEV_CS1GW1_ID, DEV_CS1GW1_EPS, DEV_CS1GW1 = compose_router('CS1-GW1', ['10/1', '1/1'])
DEV_CS1GW2_ID, DEV_CS1GW2_EPS, DEV_CS1GW2 = compose_router('CS1-GW2', ['10/1', '1/1'])
DEV_CS2GW1_ID, DEV_CS2GW1_EPS, DEV_CS2GW1 = compose_router('CS2-GW1', ['10/1', '1/1'])
DEV_CS2GW2_ID, DEV_CS2GW2_EPS, DEV_CS2GW2 = compose_router('CS2-GW2', ['10/1', '1/1'])
# Transport Network
tols_ep_uuids = [str(uuid.uuid4()).split('-')[-1] for _ in range(4)]
DEV_TOLS_ID, DEV_TOLS_EPS, DEV_TOLS = compose_ols('TN-OLS', tols_ep_uuids)
# ----- Links ----------------------------------------------------------------------------------------------------------
# InterDomain DC-CSGW
LINK_DC1GW_CS1GW1_ID, LINK_DC1GW_CS1GW1 = compose_link(DEV_DC1GW_EPS[0], DEV_CS1GW1_EPS[0])
LINK_DC1GW_CS1GW2_ID, LINK_DC1GW_CS1GW2 = compose_link(DEV_DC1GW_EPS[1], DEV_CS1GW2_EPS[0])
LINK_DC2GW_CS2GW1_ID, LINK_DC2GW_CS2GW1 = compose_link(DEV_DC2GW_EPS[0], DEV_CS2GW1_EPS[0])
LINK_DC2GW_CS2GW2_ID, LINK_DC2GW_CS2GW2 = compose_link(DEV_DC2GW_EPS[1], DEV_CS2GW2_EPS[0])
# InterDomain CSGW-TN
LINK_CS1GW1_TOLS_ID, LINK_CS1GW1_TOLS = compose_link(DEV_CS1GW1_EPS[1], DEV_TOLS_EPS[0])
LINK_CS1GW2_TOLS_ID, LINK_CS1GW2_TOLS = compose_link(DEV_CS1GW2_EPS[1], DEV_TOLS_EPS[1])
LINK_CS2GW1_TOLS_ID, LINK_CS2GW1_TOLS = compose_link(DEV_CS2GW1_EPS[1], DEV_TOLS_EPS[2])
LINK_CS2GW2_TOLS_ID, LINK_CS2GW2_TOLS = compose_link(DEV_CS2GW2_EPS[1], DEV_TOLS_EPS[3])
# ----- WIM Service Settings -------------------------------------------------------------------------------------------
WIM_USERNAME = 'admin'
WIM_PASSWORD = 'admin'
def mapping(site_id, ce_endpoint_id, pe_device_id, priority=None, redundant=[]):
ce_endpoint_id = ce_endpoint_id['endpoint_id']
ce_device_uuid = ce_endpoint_id['device_id']['device_uuid']['uuid']
ce_endpoint_uuid = ce_endpoint_id['endpoint_uuid']['uuid']
pe_device_uuid = pe_device_id['device_uuid']['uuid']
service_endpoint_id = '{:s}:{:s}:{:s}'.format(site_id, ce_device_uuid, ce_endpoint_uuid)
bearer = '{:s}:{:s}'.format(ce_device_uuid, pe_device_uuid)
_mapping = {
'service_endpoint_id': service_endpoint_id,
'datacenter_id': site_id, 'device_id': ce_device_uuid, 'device_interface_id': ce_endpoint_uuid,
'service_mapping_info': {
'site-id': site_id,
'bearer': {'bearer-reference': bearer},
}
}
if priority is not None: _mapping['service_mapping_info']['priority'] = priority
if len(redundant) > 0: _mapping['service_mapping_info']['redundant'] = redundant
return service_endpoint_id, _mapping
WIM_SEP_DC1_PRI, WIM_MAP_DC1_PRI = mapping('DC1', DEV_DC1GW_EPS[0], DEV_CS1GW1_ID, priority=10, redundant=['DC1:DC1-GW:eth2'])
WIM_SEP_DC1_SEC, WIM_MAP_DC1_SEC = mapping('DC1', DEV_DC1GW_EPS[1], DEV_CS1GW2_ID, priority=20, redundant=['DC1:DC1-GW:eth1'])
WIM_SEP_DC2_PRI, WIM_MAP_DC2_PRI = mapping('DC2', DEV_DC2GW_EPS[0], DEV_CS2GW1_ID, priority=10, redundant=['DC2:DC2-GW:eth2'])
WIM_SEP_DC2_SEC, WIM_MAP_DC2_SEC = mapping('DC2', DEV_DC2GW_EPS[1], DEV_CS2GW2_ID, priority=20, redundant=['DC2:DC2-GW:eth1'])
WIM_MAPPING = [WIM_MAP_DC1_PRI, WIM_MAP_DC1_SEC, WIM_MAP_DC2_PRI, WIM_MAP_DC2_SEC]
WIM_SRV_VLAN_ID = 300
WIM_SERVICE_TYPE = 'ELAN'
WIM_SERVICE_CONNECTION_POINTS = [
{'service_endpoint_id': WIM_SEP_DC1_PRI,
'service_endpoint_encapsulation_type': 'dot1q',
'service_endpoint_encapsulation_info': {'vlan': WIM_SRV_VLAN_ID}},
{'service_endpoint_id': WIM_SEP_DC2_PRI,
'service_endpoint_encapsulation_type': 'dot1q',
'service_endpoint_encapsulation_info': {'vlan': WIM_SRV_VLAN_ID}},
]
# ----- Containers -----------------------------------------------------------------------------------------------------
CONTEXTS = [ CONTEXT ]
TOPOLOGIES = [ TOPO_ADMIN, TOPO_DC1, TOPO_DC2, TOPO_CS1, TOPO_CS2, TOPO_TN ]
DEVICES = [ DEV_DC1GW, DEV_DC2GW,
DEV_CS1GW1, DEV_CS1GW2, DEV_CS2GW1, DEV_CS2GW2,
DEV_TOLS,
]
LINKS = [ LINK_DC1GW_CS1GW1, LINK_DC1GW_CS1GW2, LINK_DC2GW_CS2GW1, LINK_DC2GW_CS2GW2,
LINK_CS1GW1_TOLS, LINK_CS1GW2_TOLS, LINK_CS2GW1_TOLS, LINK_CS2GW2_TOLS,
]
OBJECTS_PER_TOPOLOGY = [
(TOPO_ADMIN_ID,
[DEV_DC1GW_ID, DEV_DC2GW_ID, DEV_CS1GW1_ID, DEV_CS1GW2_ID, DEV_CS2GW1_ID, DEV_CS2GW2_ID, DEV_TOLS_ID],
[LINK_DC1GW_CS1GW1_ID, LINK_DC1GW_CS1GW2_ID, LINK_DC2GW_CS2GW1_ID, LINK_DC2GW_CS2GW2_ID],
),
(TOPO_DC1_ID,
[DEV_DC1GW_ID],
[]),
(TOPO_DC2_ID,
[DEV_DC2GW_ID],
[]),
(TOPO_CS1_ID,
[DEV_CS1GW1_ID, DEV_CS1GW2_ID],
[]),
(TOPO_CS2_ID,
[DEV_CS2GW1_ID, DEV_CS2GW2_ID],
[]),
(TOPO_TN_ID,
[DEV_TOLS_ID],
[]),
]
......@@ -25,7 +25,7 @@ from common.tools.object_factory.Topology import json_topology, json_topology_id
# if true, Device component is present and will infeer the endpoints from connect-rules
# if false, Device component is not present and device objects must contain preconfigured endpoints
ADD_CONNECT_RULES_TO_DEVICES = os.environ.get('ADD_CONNECT_RULES_TO_DEVICES', 'False')
ADD_CONNECT_RULES_TO_DEVICES = os.environ.get('ADD_CONNECT_RULES_TO_DEVICES', 'True')
ADD_CONNECT_RULES_TO_DEVICES = ADD_CONNECT_RULES_TO_DEVICES.upper() in {'T', 'TRUE', '1', 'Y', 'YES'}
def compose_router(device_uuid, endpoint_uuids, topology_id=None):
......
......@@ -18,8 +18,9 @@ from context.client.ContextClient import ContextClient
from device.client.DeviceClient import DeviceClient
from .Fixtures import context_client, device_client
#from .Objects_BigNet import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES
from .Objects_DC_CSGW_TN import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, OBJECTS_PER_TOPOLOGY
#from .Objects_DC_CSGW_TN_OLS import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES
#from .Objects_DC_CSGW_TN import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, OBJECTS_PER_TOPOLOGY
#from .Objects_DC_CSGW_TN_OLS import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, OBJECTS_PER_TOPOLOGY
from .Objects_DC_CSGW_OLS import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, OBJECTS_PER_TOPOLOGY
LOGGER = logging.getLogger(__name__)
......
......@@ -19,8 +19,9 @@ from context.client.ContextClient import ContextClient
from device.client.DeviceClient import DeviceClient
from .Fixtures import context_client, device_client
#from .Objects_BigNet import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES
from .Objects_DC_CSGW_TN import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES
#from .Objects_DC_CSGW_TN import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES
#from .Objects_DC_CSGW_TN_OLS import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES
from .Objects_DC_CSGW_OLS import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES
LOGGER = logging.getLogger(__name__)
......
......@@ -20,10 +20,13 @@ from context.client.ContextClient import ContextClient
from .Fixtures import context_client, osm_wim
#from .Objects_BigNet import (
# CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, WIM_SERVICE_CONNECTION_POINTS, WIM_SERVICE_TYPE)
from .Objects_DC_CSGW_TN import (
CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, WIM_SERVICE_CONNECTION_POINTS, WIM_SERVICE_TYPE)
#from .Objects_DC_CSGW_TN import (
# CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, WIM_SERVICE_CONNECTION_POINTS, WIM_SERVICE_TYPE)
#from .Objects_DC_CSGW_TN_OLS import (
# CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, WIM_SERVICE_CONNECTION_POINTS, WIM_SERVICE_TYPE)
from .Objects_DC_CSGW_OLS import (
CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, WIM_SERVICE_CONNECTION_POINTS, WIM_SERVICE_TYPE)
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.DEBUG)
......
......@@ -26,10 +26,12 @@ from context.client.ContextClient import ContextClient
from .Fixtures import context_client, osm_wim
#from .Objects_BigNet import (
# CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, WIM_SERVICE_CONNECTION_POINTS, WIM_SERVICE_TYPE)
from .Objects_DC_CSGW_TN import (
CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, WIM_SERVICE_CONNECTION_POINTS, WIM_SERVICE_TYPE)
#from .Objects_DC_CSGW_TN import (
# CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, WIM_SERVICE_CONNECTION_POINTS, WIM_SERVICE_TYPE)
#from .Objects_DC_CSGW_TN_OLS import (
# CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, WIM_SERVICE_CONNECTION_POINTS, WIM_SERVICE_TYPE)
from .Objects_DC_CSGW_TN_OLS import (
CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, WIM_SERVICE_CONNECTION_POINTS, WIM_SERVICE_TYPE)
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.DEBUG)
......
......@@ -2,7 +2,7 @@
export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/"
# Set the list of components, separated by spaces, you want to build images for, and deploy.
export TFS_COMPONENTS="context device automation service compute monitoring webui"
export TFS_COMPONENTS="context device automation monitoring pathcomp service slice compute webui"
# Set the tag you want to use for your images.
export TFS_IMAGE_TAG="dev"
......
{"overwrite": true, "folderId": 0, "dashboard":
{
"id": null,
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "datasource",
"uid": "grafana"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"target": {
"limit": 100,
"matchAny": false,
"tags": [],
"type": "dashboard"
},
"type": "dashboard"
}
]
},
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
"iteration": 1664814762635,
"links": [],
"liveNow": false,
"panels": [
{
"datasource": {
"type": "postgres",
"uid": "monitoringdb"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "smooth",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "always",
"spanNulls": true,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": [
{
"matcher": {
"id": "byRegexp",
"options": ".*PACKETS_.*"
},
"properties": [
{
"id": "custom.axisPlacement",
"value": "left"
},
{
"id": "unit",
"value": "pps"
},
{
"id": "custom.axisLabel",
"value": "Packets / sec"
},
{
"id": "custom.axisSoftMin",
"value": 0
}
]
},
{
"matcher": {
"id": "byRegexp",
"options": ".*BYTES_.*"
},
"properties": [
{
"id": "custom.axisPlacement",
"value": "right"
},
{
"id": "unit",
"value": "Bps"
},
{
"id": "custom.axisLabel",
"value": "Bytes / sec"
},
{
"id": "custom.axisSoftMin",
"value": 0
}
]
}
]
},
"gridPos": {
"h": 19,
"w": 24,
"x": 0,
"y": 0
},
"id": 2,
"options": {
"legend": {
"calcs": [
"first",
"min",
"mean",
"max",
"lastNotNull"
],
"displayMode": "table",
"placement": "right"
},
"tooltip": {
"mode": "multi",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "postgres",
"uid": "monitoringdb"
},
"format": "time_series",
"group": [],
"hide": false,
"metricColumn": "kpi_value",
"rawQuery": true,
"rawSql": "SELECT\r\n $__time(timestamp), kpi_value AS metric, device_id, endpoint_id, kpi_sample_type\r\nFROM\r\n monitoring\r\nWHERE\r\n $__timeFilter(timestamp) AND device_id IN ($device_id) AND endpoint_id IN ($endpoint_id) AND kpi_sample_type IN ($kpi_sample_type)\r\nGROUP BY\r\n device_id, endpoint_id, kpi_sample_type\r\nORDER BY\r\n timestamp\r\n",
"refId": "A",
"select": [
[
{
"params": [
"kpi_value"
],
"type": "column"
}
]
],
"table": "monitoring",
"timeColumn": "timestamp",
"where": [
{
"name": "",
"params": [
"device_id",
"IN",
"$device_id"
],
"type": "expression"
}
]
}
],
"title": "L3 Monitoring Packets/Bytes Received/Sent",
"transformations": [
{
"id": "renameByRegex",
"options": {
"regex": "metric {device_id=\\\"([^\\\"]+)\\\", endpoint_id=\\\"([^\\\"]+)\\\", kpi_sample_type=\\\"([^\\\"]+)\\\"}",
"renamePattern": "$3 ($1 $2)"
}
}
],
"type": "timeseries"
}
],
"refresh": "5s",
"schemaVersion": 36,
"style": "dark",
"tags": [],
"templating": {
"list": [
{
"current": {
"selected": true,
"text": [
"All"
],
"value": [
"$__all"
]
},
"datasource": {
"type": "postgres",
"uid": "monitoringdb"
},
"definition": "SELECT DISTINCT device_id FROM monitoring;",
"hide": 0,
"includeAll": true,
"label": "Device",
"multi": true,
"name": "device_id",
"options": [],
"query": "SELECT DISTINCT device_id FROM monitoring;",
"refresh": 2,
"regex": "",
"skipUrlSync": false,
"sort": 0,
"type": "query"
},
{
"current": {
"selected": false,
"text": "All",
"value": "$__all"
},
"datasource": {
"type": "postgres",
"uid": "monitoringdb"
},
"definition": "SELECT DISTINCT endpoint_id FROM monitoring WHERE device_id IN (${device_id})",
"hide": 0,
"includeAll": true,
"label": "EndPoint",
"multi": true,
"name": "endpoint_id",
"options": [],
"query": "SELECT DISTINCT endpoint_id FROM monitoring WHERE device_id IN (${device_id})",
"refresh": 2,
"regex": "",
"skipUrlSync": false,
"sort": 0,
"type": "query"
},
{
"current": {
"selected": true,
"text": [
"PACKETS_RECEIVED",
"PACKETS_TRANSMITTED"
],
"value": [
"PACKETS_RECEIVED",
"PACKETS_TRANSMITTED"
]
},
"datasource": {
"type": "postgres",
"uid": "monitoringdb"
},
"definition": "SELECT DISTINCT kpi_sample_type FROM monitoring;",
"hide": 0,
"includeAll": true,
"label": "Kpi Sample Type",
"multi": true,
"name": "kpi_sample_type",
"options": [],
"query": "SELECT DISTINCT kpi_sample_type FROM monitoring;",
"refresh": 2,
"regex": "",
"skipUrlSync": false,
"sort": 0,
"type": "query"
}
]
},
"time": {
"from": "now-15m",
"to": "now"
},
"timepicker": {},
"timezone": "",
"title": "L3 Monitoring",
"uid": "tf-l3-monit",
"version": 1,
"weekStart": ""
}
}
......@@ -13,6 +13,7 @@
# limitations under the License.
import copy, json, logging
from typing import Optional
from flask import jsonify, redirect, render_template, Blueprint, flash, session, url_for, request
from common.proto.context_pb2 import Connection, Context, Device, Empty, Link, Service, Slice, Topology, ContextIdList
from common.tools.grpc.Tools import grpc_message_to_json_string
......@@ -43,9 +44,10 @@ ENTITY_TO_TEXT = {
}
ACTION_TO_TEXT = {
# action => infinitive, past
'add' : ('Add', 'Added'),
'update' : ('Update', 'Updated'),
# action => infinitive, past
'add' : ('Add', 'Added'),
'update' : ('Update', 'Updated'),
'config' : ('Configure', 'Configured'),
}
def process_descriptor(entity_name, action_name, grpc_method, grpc_class, entities):
......@@ -94,14 +96,14 @@ def process_descriptors(descriptors):
topology['device_ids'] = []
topology['link_ids'] = []
process_descriptor('context', 'add', context_client.SetContext, Context, contexts_add )
process_descriptor('topology', 'add', context_client.SetTopology, Topology, topologies_add)
process_descriptor('device', 'add', context_client.SetDevice, Device, devices )
process_descriptor('link', 'add', context_client.SetLink, Link, links )
process_descriptor('service', 'add', context_client.SetService, Service, services )
process_descriptor('context', 'update', context_client.SetContext, Context, contexts )
process_descriptor('topology', 'update', context_client.SetTopology, Topology, topologies )
process_descriptor('slice', 'add', context_client.SetSlice, Slice, slices )
process_descriptor('context', 'add', context_client.SetContext, Context, contexts_add )
process_descriptor('topology', 'add', context_client.SetTopology, Topology, topologies_add)
process_descriptor('device', 'add', context_client.SetDevice, Device, devices )
process_descriptor('link', 'add', context_client.SetLink, Link, links )
process_descriptor('service', 'add', context_client.SetService, Service, services )
process_descriptor('context', 'update', context_client.SetContext, Context, contexts )
process_descriptor('topology', 'update', context_client.SetTopology, Topology, topologies )
process_descriptor('slice', 'add', context_client.SetSlice, Slice, slices )
process_descriptor('connection', 'add', context_client.SetConnection, Connection, connections )
context_client.close()
return
......@@ -111,6 +113,28 @@ def process_descriptors(descriptors):
# in normal mode, connections should not be set
assert len(connections) == 0
devices_add = []
devices_config = []
for device in devices:
connect_rules = []
config_rules = []
for config_rule in device.get('device_config', {}).get('config_rules', []):
custom_resource_key : Optional[str] = config_rule.get('custom', {}).get('resource_key')
if custom_resource_key is not None and custom_resource_key.startswith('_connect/'):
connect_rules.append(config_rule)
else:
config_rules.append(config_rule)
if len(connect_rules) > 0:
device_add = copy.deepcopy(device)
device_add['device_endpoints'] = []
device_add['device_config'] = {'config_rules': connect_rules}
devices_add.append(device_add)
if len(config_rules) > 0:
device['device_config'] = {'config_rules': config_rules}
devices_config.append(device)
services_add = []
for service in services:
service_copy = copy.deepcopy(service)
......@@ -132,14 +156,15 @@ def process_descriptors(descriptors):
service_client.connect()
slice_client.connect()
process_descriptor('context', 'add', context_client.SetContext, Context, contexts )
process_descriptor('topology', 'add', context_client.SetTopology, Topology, topologies )
process_descriptor('device', 'add', device_client .AddDevice, Device, devices )
process_descriptor('link', 'add', context_client.SetLink, Link, links )
process_descriptor('service', 'add', service_client.CreateService, Service, services_add)
process_descriptor('service', 'update', service_client.UpdateService, Service, services )
process_descriptor('slice', 'add', slice_client.CreateSlice, Slice, slices_add )
process_descriptor('slice', 'update', slice_client.UpdateSlice, Slice, slices )
process_descriptor('context', 'add', context_client.SetContext, Context, contexts )
process_descriptor('topology', 'add', context_client.SetTopology, Topology, topologies )
process_descriptor('device', 'add', device_client .AddDevice, Device, devices_add )
process_descriptor('device', 'config', device_client .ConfigureDevice, Device, devices_config)
process_descriptor('link', 'add', context_client.SetLink, Link, links )
process_descriptor('service', 'add', service_client.CreateService, Service, services_add )
process_descriptor('service', 'update', service_client.UpdateService, Service, services )
process_descriptor('slice', 'add', slice_client .CreateSlice, Slice, slices_add )
process_descriptor('slice', 'update', slice_client .UpdateSlice, Slice, slices )
slice_client.close()
service_client.close()
......
......@@ -83,9 +83,9 @@
<a class="nav-link" href="{{ url_for('slice.home') }}">Slice</a>
{% endif %}
</li>
<!--<li class="nav-item">
<li class="nav-item">
<a class="nav-link" href="/grafana" id="grafana_link" target="grafana">Grafana</a>
</li>-->
</li>
<li class="nav-item">
<a class="nav-link" href="{{ url_for('main.debug') }}">Debug</a>
......
......@@ -45,6 +45,7 @@
<div class="col-sm-4">
<b>UUID: </b>{{ device.device_id.device_uuid.uuid }}<br><br>
<b>Type: </b>{{ device.device_type }}<br><br>
<b>Status: </b> {{ dose.Name(device.device_operational_status).replace('DEVICEOPERATIONALSTATUS_', '') }}<br>
<b>Drivers: </b>
<ul>
{% for driver in device.device_drivers %}
......