Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • tfs/controller
1 result
Show changes
Showing
with 891 additions and 119 deletions
......@@ -108,12 +108,15 @@ class EventsDeviceCollector:
if config_rule.action != ConfigActionEnum.CONFIGACTION_SET: continue
if config_rule.WhichOneof('config_rule') != 'custom': continue
str_resource_key = str(config_rule.custom.resource_key)
if not str_resource_key.startswith('/interface['): continue
json_resource_value = json.loads(config_rule.custom.resource_value)
if 'name' not in json_resource_value: continue
if 'enabled' not in json_resource_value: continue
if not json_resource_value['enabled']: continue
enabled_endpoint_names.add(json_resource_value['name'])
if str_resource_key.startswith('/interface[') or str_resource_key.startswith('/endpoints/endpoint['):
json_resource_value = json.loads(config_rule.custom.resource_value)
if 'name' not in json_resource_value: continue
if 'enabled' in json_resource_value:
if not json_resource_value['enabled']: continue
enabled_endpoint_names.add(json_resource_value['name'])
if 'oper-status' in json_resource_value:
if str(json_resource_value['oper-status']).upper() != 'UP': continue
enabled_endpoint_names.add(json_resource_value['name'])
endpoints_monitored = self._device_endpoint_monitored.setdefault(device_uuid, dict())
for endpoint in device.device_endpoints:
......@@ -127,7 +130,10 @@ class EventsDeviceCollector:
endpoint_was_monitored = endpoints_monitored.get(endpoint_uuid, False)
endpoint_is_enabled = (endpoint_name_or_uuid in enabled_endpoint_names)
if not endpoint_was_monitored and endpoint_is_enabled:
if not endpoint_was_monitored and not endpoint_is_enabled:
# endpoint is idle, do nothing
pass
elif not endpoint_was_monitored and endpoint_is_enabled:
# activate
for value in endpoint.kpi_sample_types:
if value == KPISAMPLETYPE_UNKNOWN: continue
......
......@@ -88,6 +88,8 @@ COPY src/slice/__init__.py slice/__init__.py
COPY src/slice/client/. slice/client/
COPY src/qkd_app/__init__.py qkd_app/__init__.py
COPY src/qkd_app/client/. qkd_app/client/
COPY src/vnt_manager/__init__.py vnt_manager/__init__.py
COPY src/vnt_manager/client/. vnt_manager/client/
RUN mkdir -p /var/teraflow/tests/tools
COPY src/tests/tools/mock_osm/. tests/tools/mock_osm/
......
......@@ -25,3 +25,4 @@ git+https://github.com/robshakir/pyangbind.git
pydantic==2.6.3
requests==2.27.1
werkzeug==2.3.7
websockets==12.0
......@@ -20,7 +20,7 @@ from common.proto.nbi_pb2_grpc import NbiServiceServicer
LOGGER = logging.getLogger(__name__)
METRICS_POOL = MetricsPool('Compute', 'RPC')
METRICS_POOL = MetricsPool('NBI', 'RPC')
class NbiServiceServicerImpl(NbiServiceServicer):
def __init__(self):
......
......@@ -31,6 +31,7 @@ from .rest_server.nbi_plugins.ietf_network_slice import register_ietf_nss
from .rest_server.nbi_plugins.ietf_acl import register_ietf_acl
from .rest_server.nbi_plugins.qkd_app import register_qkd_app
from .rest_server.nbi_plugins.tfs_api import register_tfs_api
from .context_subscription import register_context_subscription
terminate = threading.Event()
LOGGER = None
......@@ -80,6 +81,8 @@ def main():
register_tfs_api(rest_server)
rest_server.start()
register_context_subscription()
LOGGER.debug('Configured Resources:')
for resource in rest_server.api.resources:
LOGGER.debug(' - {:s}'.format(str(resource)))
......
# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from websockets.sync.server import serve
from common.proto.vnt_manager_pb2 import VNTSubscriptionRequest
from common.Settings import get_setting
from context.client.ContextClient import ContextClient
from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME
from common.tools.object_factory.Topology import json_topology_id
from common.tools.object_factory.Context import json_context_id
from common.proto.context_pb2 import ContextId, TopologyId
import json
import os
from vnt_manager.client.VNTManagerClient import VNTManagerClient
JSON_ADMIN_CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_NAME)
ADMIN_CONTEXT_ID = ContextId(**JSON_ADMIN_CONTEXT_ID)
ADMIN_TOPOLOGY_ID = TopologyId(**json_topology_id(DEFAULT_TOPOLOGY_NAME, context_id=JSON_ADMIN_CONTEXT_ID))
vnt_manager_client: VNTManagerClient = VNTManagerClient()
context_client: ContextClient = ContextClient()
ALL_HOSTS = "0.0.0.0"
WS_E2E_PORT = int(get_setting('WS_E2E_PORT', default='8762'))
LOGGER = logging.getLogger(__name__)
def register_context_subscription():
with serve(subcript_to_vnt_manager, ALL_HOSTS, WS_E2E_PORT, logger=LOGGER) as server:
LOGGER.info("Running subscription server...: {}:{}".format(ALL_HOSTS, str(WS_E2E_PORT)))
server.serve_forever()
LOGGER.info("Exiting subscription server...")
def subcript_to_vnt_manager(websocket):
for message in websocket:
LOGGER.debug("Message received: {}".format(message))
message_json = json.loads(message)
request = VNTSubscriptionRequest()
request.host = message_json['host']
request.port = message_json['port']
LOGGER.debug("Received gRPC from ws: {}".format(request))
try:
vntm_reply = vnt_manager_client.VNTSubscript(request)
LOGGER.debug("Received gRPC from vntm: {}".format(vntm_reply))
except Exception as e:
LOGGER.error('Could not subscript to VTNManager: {}'.format(e))
websocket.send(vntm_reply.subscription)
......@@ -195,11 +195,17 @@ def process_site(site : Dict, errors : List[Dict]) -> None:
# site_static_routing: (lan-range, lan-prefix-len, lan-tag) => next-hop
site_static_routing : Dict[Tuple[str, str], str] = {}
for rt_proto in site['routing-protocols']['routing-protocol']:
site_routing_protocols : Dict = site.get('routing-protocols', dict())
site_routing_protocol : List = site_routing_protocols.get('routing-protocol', list())
for rt_proto in site_routing_protocol:
if rt_proto['type'] != 'ietf-l3vpn-svc:static':
MSG = 'Site Routing Protocol Type: {:s}'
raise NotImplementedError(MSG.format(str(rt_proto['type'])))
for ipv4_rt in rt_proto['static']['cascaded-lan-prefixes']['ipv4-lan-prefixes']:
rt_proto_static : Dict = rt_proto.get('static', dict())
rt_proto_static_clps : Dict = rt_proto_static.get('cascaded-lan-prefixes', dict())
rt_proto_static_clps_v4 = rt_proto_static_clps.get('ipv4-lan-prefixes', list())
for ipv4_rt in rt_proto_static_clps_v4:
lan_range, lan_prefix = ipv4_rt['lan'].split('/')
lan_prefix = int(lan_prefix)
lan_tag = int(ipv4_rt['lan-tag'].replace('vlan', ''))
......
......@@ -44,7 +44,7 @@ class L3VPN_Service(Resource):
service_ready_status = ServiceStatusEnum.SERVICESTATUS_ACTIVE
service_status = target.service_status.service_status # pylint: disable=no-member
response = jsonify({})
response = jsonify({'service-id': target.service_id.service_uuid.uuid})
response.status_code = HTTP_OK if service_status == service_ready_status else HTTP_GATEWAYTIMEOUT
except Exception as e: # pylint: disable=broad-except
LOGGER.exception('Something went wrong Retrieving VPN({:s})'.format(str(vpn_id)))
......
......@@ -13,27 +13,34 @@
# limitations under the License.
import json
import logging
from flask.json import jsonify
from flask_restful import Resource, request
from werkzeug.exceptions import BadRequest
from common.proto.context_pb2 import Empty
from common.proto.context_pb2 import Empty, LinkTypeEnum
from common.tools.grpc.Tools import grpc_message_to_json
from context.client.ContextClient import ContextClient
from device.client.DeviceClient import DeviceClient
from service.client.ServiceClient import ServiceClient
from slice.client.SliceClient import SliceClient
from vnt_manager.client.VNTManagerClient import VNTManagerClient
from .Tools import (
format_grpc_to_json, grpc_connection_id, grpc_context, grpc_context_id, grpc_device,
grpc_device_id, grpc_link, grpc_link_id, grpc_policy_rule_id,
grpc_service_id, grpc_service, grpc_slice, grpc_slice_id, grpc_topology, grpc_topology_id
)
LOGGER = logging.getLogger(__name__)
class _Resource(Resource):
def __init__(self) -> None:
super().__init__()
self.context_client = ContextClient()
self.device_client = DeviceClient()
self.service_client = ServiceClient()
self.vntmanager_client = VNTManagerClient()
self.slice_client = SliceClient()
class ContextIds(_Resource):
......@@ -292,9 +299,14 @@ class Link(_Resource):
return format_grpc_to_json(self.context_client.GetLink(grpc_link_id(link_uuid)))
def put(self, link_uuid : str):
link = request.get_json()
if link_uuid != link['link_id']['link_uuid']['uuid']:
link_json = request.get_json()
link = grpc_link(link_json)
virtual_types = {LinkTypeEnum.LINKTYPE_VIRTUAL_COPPER, LinkTypeEnum.LINKTYPE_VIRTUAL_OPTICAL}
if link_uuid != link.link_id.link_uuid.uuid:
raise BadRequest('Mismatching link_uuid')
elif link.link_type in virtual_types:
link = grpc_link(link_json)
return format_grpc_to_json(self.vntmanager_client.SetVirtualLink(link))
return format_grpc_to_json(self.context_client.SetLink(grpc_link(link)))
def delete(self, link_uuid : str):
......
......@@ -14,6 +14,7 @@
import json, logging, requests, uuid
from typing import Dict, List, Optional, Tuple, Union
from common.DeviceTypes import DeviceTypeEnum
from common.proto.context_pb2 import (
ConfigRule, Connection, Device, DeviceList, EndPointId, Link, LinkList, Service, ServiceStatusEnum, ServiceTypeEnum
)
......@@ -251,21 +252,37 @@ class _Algorithm:
]
self.logger.debug('path_hops = {:s}'.format(str(path_hops)))
try:
_device_dict = {k:v[0] for k,v in self.device_dict.items()}
self.logger.debug('self.device_dict = {:s}'.format(str(_device_dict)))
connections = convert_explicit_path_hops_to_connections(
path_hops, self.device_dict, main_service_uuid, main_service_type)
self.logger.debug('EXTRAPOLATED connections = {:s}'.format(str(connections)))
except: # pylint: disable=bare-except
MSG = ' '.join([
'Unable to Extrapolate sub-services and sub-connections.',
'Assuming single-service and single-connection.',
])
self.logger.exception(MSG)
device_types = {v[0]['device_type'] for k,v in self.device_dict.items()}
DEVICES_BASIC_CONNECTION = {
DeviceTypeEnum.DATACENTER.value, DeviceTypeEnum.EMULATED_DATACENTER.value,
DeviceTypeEnum.CLIENT.value, DeviceTypeEnum.EMULATED_CLIENT.value,
DeviceTypeEnum.PACKET_ROUTER.value, DeviceTypeEnum.EMULATED_PACKET_ROUTER.value,
}
self.logger.debug('device_types = {:s}'.format(str(device_types)))
self.logger.debug('DEVICES_BASIC_CONNECTION = {:s}'.format(str(DEVICES_BASIC_CONNECTION)))
is_basic_connection = device_types.issubset(DEVICES_BASIC_CONNECTION)
self.logger.debug('is_basic_connection = {:s}'.format(str(is_basic_connection)))
if is_basic_connection:
self.logger.info('Assuming basic connections...')
connections = convert_explicit_path_hops_to_plain_connection(
path_hops, main_service_uuid, main_service_type)
self.logger.debug('BASIC connections = {:s}'.format(str(connections)))
else:
try:
_device_dict = {k:v[0] for k,v in self.device_dict.items()}
self.logger.debug('self.device_dict = {:s}'.format(str(_device_dict)))
connections = convert_explicit_path_hops_to_connections(
path_hops, self.device_dict, main_service_uuid, main_service_type)
self.logger.debug('EXTRAPOLATED connections = {:s}'.format(str(connections)))
except: # pylint: disable=bare-except
MSG = ' '.join([
'Unable to Extrapolate sub-services and sub-connections.',
'Assuming single-service and single-connection.',
])
self.logger.exception(MSG)
connections = convert_explicit_path_hops_to_plain_connection(
path_hops, main_service_uuid, main_service_type)
self.logger.debug('BASIC connections = {:s}'.format(str(connections)))
for connection in connections:
service_uuid,service_type,path_hops,_ = connection
......
......@@ -57,6 +57,11 @@ class SettingsHandler:
def get(self, key_or_path : Union[str, List[str]], default : Optional[Any] = None) -> Optional[TreeNode]:
return get_subnode(self.__resolver, self.__config, key_or_path, default=default)
def get_service_settings(self) -> Optional[TreeNode]:
service_settings_uri = '/settings'
service_settings = self.get(service_settings_uri)
return service_settings
def get_device_settings(self, device : Device) -> Optional[TreeNode]:
device_keys = device.device_id.device_uuid.uuid, device.name
......
......@@ -15,14 +15,17 @@
import json, logging
from typing import Any, Dict, List, Optional, Tuple, Union
from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method
from common.proto.context_pb2 import ConfigRule, DeviceId, Service
from common.proto.context_pb2 import ConfigRule, ConnectionId, DeviceId, Service
from common.tools.object_factory.Connection import json_connection_id
from common.tools.object_factory.Device import json_device_id
from common.type_checkers.Checkers import chk_type
from service.service.service_handler_api.Tools import get_device_endpoint_uuids, get_endpoint_matching
from service.service.service_handler_api._ServiceHandler import _ServiceHandler
from service.service.service_handler_api.SettingsHandler import SettingsHandler
from service.service.service_handler_api.Tools import get_device_endpoint_uuids, get_endpoint_matching
from service.service.task_scheduler.TaskExecutor import TaskExecutor
from service.service.tools.EndpointIdFormatters import endpointids_to_raw
from .ConfigRuleComposer import ConfigRuleComposer
from .StaticRouteGenerator import StaticRouteGenerator
LOGGER = logging.getLogger(__name__)
......@@ -35,24 +38,35 @@ class L3NMGnmiOpenConfigServiceHandler(_ServiceHandler):
self.__service = service
self.__task_executor = task_executor
self.__settings_handler = SettingsHandler(service.service_config, **settings)
self.__composer = ConfigRuleComposer()
self.__endpoint_map : Dict[Tuple[str, str], str] = dict()
self.__config_rule_composer = ConfigRuleComposer()
self.__static_route_generator = StaticRouteGenerator(self.__config_rule_composer)
self.__endpoint_map : Dict[Tuple[str, str], Tuple[str, str]] = dict()
def _compose_config_rules(self, endpoints : List[Tuple[str, str, Optional[str]]]) -> None:
if len(endpoints) % 2 != 0: raise Exception('Number of endpoints should be even')
service_settings = self.__settings_handler.get_service_settings()
self.__config_rule_composer.configure(self.__service, service_settings)
for endpoint in endpoints:
device_uuid, endpoint_uuid = get_device_endpoint_uuids(endpoint)
device_obj = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid)))
device_settings = self.__settings_handler.get_device_settings(device_obj)
_device = self.__composer.get_device(device_obj.name)
self.__config_rule_composer.set_device_alias(device_obj.name, device_uuid)
_device = self.__config_rule_composer.get_device(device_obj.name)
_device.configure(device_obj, device_settings)
endpoint_obj = get_endpoint_matching(device_obj, endpoint_uuid)
endpoint_settings = self.__settings_handler.get_endpoint_settings(device_obj, endpoint_obj)
_device.set_endpoint_alias(endpoint_obj.name, endpoint_uuid)
_endpoint = _device.get_endpoint(endpoint_obj.name)
_endpoint.configure(endpoint_obj, endpoint_settings)
self.__endpoint_map[(device_uuid, endpoint_uuid)] = device_obj.name
self.__endpoint_map[(device_uuid, endpoint_uuid)] = (device_obj.name, endpoint_obj.name)
self.__static_route_generator.compose(endpoints)
LOGGER.debug('config_rule_composer = {:s}'.format(json.dumps(self.__config_rule_composer.dump())))
def _do_configurations(
self, config_rules_per_device : Dict[str, List[Dict]], endpoints : List[Tuple[str, str, Optional[str]]],
......@@ -62,7 +76,7 @@ class L3NMGnmiOpenConfigServiceHandler(_ServiceHandler):
results_per_device = dict()
for device_name,json_config_rules in config_rules_per_device.items():
try:
device_obj = self.__composer.get_device(device_name).objekt
device_obj = self.__config_rule_composer.get_device(device_name).objekt
if len(json_config_rules) == 0: continue
del device_obj.device_config.config_rules[:]
for json_config_rule in json_config_rules:
......@@ -78,7 +92,8 @@ class L3NMGnmiOpenConfigServiceHandler(_ServiceHandler):
results = []
for endpoint in endpoints:
device_uuid, endpoint_uuid = get_device_endpoint_uuids(endpoint)
device_name = self.__endpoint_map[(device_uuid, endpoint_uuid)]
device_name, _ = self.__endpoint_map[(device_uuid, endpoint_uuid)]
if device_name not in results_per_device: continue
results.append(results_per_device[device_name])
return results
......@@ -88,12 +103,16 @@ class L3NMGnmiOpenConfigServiceHandler(_ServiceHandler):
) -> List[Union[bool, Exception]]:
chk_type('endpoints', endpoints, list)
if len(endpoints) == 0: return []
service_uuid = self.__service.service_id.service_uuid.uuid
#settings = self.__settings_handler.get('/settings')
self._compose_config_rules(endpoints)
network_instance_name = service_uuid.split('-')[0]
config_rules_per_device = self.__composer.get_config_rules(network_instance_name, delete=False)
#service_uuid = self.__service.service_id.service_uuid.uuid
connection = self.__task_executor.get_connection(ConnectionId(**json_connection_id(connection_uuid)))
connection_endpoint_ids = endpointids_to_raw(connection.path_hops_endpoint_ids)
self._compose_config_rules(connection_endpoint_ids)
#network_instance_name = service_uuid.split('-')[0]
#config_rules_per_device = self.__config_rule_composer.get_config_rules(network_instance_name, delete=False)
config_rules_per_device = self.__config_rule_composer.get_config_rules(delete=False)
LOGGER.debug('config_rules_per_device={:s}'.format(str(config_rules_per_device)))
results = self._do_configurations(config_rules_per_device, endpoints)
LOGGER.debug('results={:s}'.format(str(results)))
return results
@metered_subclass_method(METRICS_POOL)
......@@ -102,12 +121,16 @@ class L3NMGnmiOpenConfigServiceHandler(_ServiceHandler):
) -> List[Union[bool, Exception]]:
chk_type('endpoints', endpoints, list)
if len(endpoints) == 0: return []
service_uuid = self.__service.service_id.service_uuid.uuid
#settings = self.__settings_handler.get('/settings')
self._compose_config_rules(endpoints)
network_instance_name = service_uuid.split('-')[0]
config_rules_per_device = self.__composer.get_config_rules(network_instance_name, delete=True)
#service_uuid = self.__service.service_id.service_uuid.uuid
connection = self.__task_executor.get_connection(ConnectionId(**json_connection_id(connection_uuid)))
connection_endpoint_ids = endpointids_to_raw(connection.path_hops_endpoint_ids)
self._compose_config_rules(connection_endpoint_ids)
#network_instance_name = service_uuid.split('-')[0]
#config_rules_per_device = self.__config_rule_composer.get_config_rules(network_instance_name, delete=True)
config_rules_per_device = self.__config_rule_composer.get_config_rules(delete=True)
LOGGER.debug('config_rules_per_device={:s}'.format(str(config_rules_per_device)))
results = self._do_configurations(config_rules_per_device, endpoints, delete=True)
LOGGER.debug('results={:s}'.format(str(results)))
return results
@metered_subclass_method(METRICS_POOL)
......
......@@ -12,8 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, Dict, Tuple
from common.DeviceTypes import DeviceTypeEnum
from common.method_wrappers.ServiceExceptions import OperationFailedException
from common.proto.context_pb2 import ConnectionId
from common.proto.context_pb2 import ConnectionId, Device
from common.tools.grpc.Tools import grpc_message_to_json_string
from service.service.service_handler_api.Tools import check_errors_setendpoint
from service.service.task_scheduler.TaskExecutor import TaskExecutor
......@@ -21,6 +23,9 @@ from service.service.tools.EndpointIdFormatters import endpointids_to_raw
from service.service.tools.ObjectKeys import get_connection_key
from ._Task import _Task
if TYPE_CHECKING:
from service.service.service_handler_api._ServiceHandler import _ServiceHandler
KEY_TEMPLATE = 'connection({connection_id:s}):configure'
class Task_ConnectionConfigure(_Task):
......@@ -44,12 +49,24 @@ class Task_ConnectionConfigure(_Task):
service = self._task_executor.get_service(connection.service_id)
service_handler_settings = {}
service_handler = self._task_executor.get_service_handler(connection, service, **service_handler_settings)
service_handlers : Dict[DeviceTypeEnum, Tuple['_ServiceHandler', Dict[str, Device]]] = \
self._task_executor.get_service_handlers(connection, service, **service_handler_settings)
endpointids_to_set = endpointids_to_raw(connection.path_hops_endpoint_ids)
connection_uuid = connection.connection_id.connection_uuid.uuid
results_setendpoint = service_handler.SetEndpoint(endpointids_to_set, connection_uuid=connection_uuid)
errors = check_errors_setendpoint(endpointids_to_set, results_setendpoint)
endpointids_to_set = endpointids_to_raw(connection.path_hops_endpoint_ids)
errors = list()
for _, (service_handler, connection_devices) in service_handlers.items():
_endpointids_to_set = [
(device_uuid, endpoint_uuid, topology_uuid)
for device_uuid, endpoint_uuid, topology_uuid in endpointids_to_set
if device_uuid in connection_devices
]
results_setendpoint = service_handler.SetEndpoint(
_endpointids_to_set, connection_uuid=connection_uuid
)
errors.extend(check_errors_setendpoint(endpointids_to_set, results_setendpoint))
if len(errors) > 0:
MSG = 'SetEndpoint for Connection({:s}) from Service({:s})'
str_connection = grpc_message_to_json_string(connection)
......
# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.