Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • tfs/controller
1 result
Show changes
Showing
with 764 additions and 89 deletions
# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
QKD_ADDRESS = '10.0.2.10'
QKD_URL = 'http://{:s}/qkd_app/create_qkd_app'.format(QKD_ADDRESS)
QKD_REQUEST_1 = {
'app': {
'server_app_id': '1',
'client_app_id': [],
'app_status': 'ON',
'local_qkdn_id': '00000001-0000-0000-0000-0000000000',
'backing_qkdl_id': ['00000003-0002-0000-0000-0000000000']
}
}
print(requests.post(QKD_URL, json=QKD_REQUEST_1))
QKD_REQUEST_2 = {
'app': {
'server_app_id': '1',
'client_app_id': [],
'app_status': 'ON',
'local_qkdn_id': '00000003-0000-0000-0000-0000000000',
'backing_qkdl_id': ['00000003-0002-0000-0000-0000000000']
}
}
print(requests.post(QKD_URL, json=QKD_REQUEST_2))
......@@ -38,3 +38,4 @@ def test_qkd_driver_timeout_connection(mock_get, qkd_driver):
mock_get.side_effect = requests.exceptions.Timeout
qkd_driver.timeout = 0.001 # Simulate very short timeout
assert qkd_driver.Connect() is False
# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from common.method_wrappers.Decorator import MetricsPool
from common.tools.database.GenericDatabase import Database
from common.method_wrappers.ServiceExceptions import OperationFailedException
LOGGER = logging.getLogger(__name__)
METRICS_POOL = MetricsPool('KpiManager', 'Database')
class KpiDB(Database):
def __init__(self, model) -> None:
LOGGER.info('Init KpiManagerService')
super().__init__(model)
def select_with_filter(self, model, filter_object):
"""
Generic method to create filters dynamically based on filter_object attributes.
params: model: SQLAlchemy model class to query.
filter_object: Object that contains filtering criteria as attributes.
return: SQLAlchemy session, query and Model
"""
session = self.Session()
try:
query = session.query(model)
# Apply filters based on the filter_object
if filter_object.kpi_id:
query = query.filter(model.kpi_id.in_([k.kpi_id.uuid for k in filter_object.kpi_id]))
if filter_object.kpi_sample_type:
query = query.filter(model.kpi_sample_type.in_(filter_object.kpi_sample_type))
if filter_object.device_id:
query = query.filter(model.device_id.in_([d.device_uuid.uuid for d in filter_object.device_id]))
if filter_object.endpoint_id:
query = query.filter(model.endpoint_id.in_([e.endpoint_uuid.uuid for e in filter_object.endpoint_id]))
if filter_object.service_id:
query = query.filter(model.service_id.in_([s.service_uuid.uuid for s in filter_object.service_id]))
if filter_object.slice_id:
query = query.filter(model.slice_id.in_([s.slice_uuid.uuid for s in filter_object.slice_id]))
if filter_object.connection_id:
query = query.filter(model.connection_id.in_([c.connection_uuid.uuid for c in filter_object.connection_id]))
if filter_object.link_id:
query = query.filter(model.link_id.in_([l.link_uuid.uuid for l in filter_object.link_id]))
except Exception as e:
LOGGER.error(f"Error creating filter of {model.__name__} table. ERROR: {e}")
raise OperationFailedException ("CreateKpiDescriptorFilter", extra_details=["unable to create the filter {:}".format(e)])
return super().select_with_filter(query, session, model)
......@@ -18,7 +18,8 @@ from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_m
from common.proto.context_pb2 import Empty
from common.proto.kpi_manager_pb2_grpc import KpiManagerServiceServicer
from common.proto.kpi_manager_pb2 import KpiId, KpiDescriptor, KpiDescriptorFilter, KpiDescriptorList
from kpi_manager.database.Kpi_DB import KpiDB
# from kpi_manager.database.Kpi_DB import KpiDB
from kpi_manager.database.KpiDB import KpiDB
from kpi_manager.database.KpiModel import Kpi as KpiModel
LOGGER = logging.getLogger(__name__)
......@@ -27,7 +28,7 @@ METRICS_POOL = MetricsPool('KpiManager', 'NBIgRPC')
class KpiManagerServiceServicerImpl(KpiManagerServiceServicer):
def __init__(self):
LOGGER.info('Init KpiManagerService')
self.kpi_db_obj = KpiDB()
self.kpi_db_obj = KpiDB(KpiModel)
@safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
def SetKpiDescriptor(self, request: KpiDescriptor, grpc_context: grpc.ServicerContext # type: ignore
......
......@@ -16,8 +16,11 @@ import logging, signal, sys, threading
from common.Settings import get_log_level
from .KpiManagerService import KpiManagerService
from kpi_manager.database.KpiModel import Kpi as Model
from common.tools.database.GenericDatabase import Database
terminate = threading.Event()
LOGGER = None
LOGGER = None
def signal_handler(signal, frame): # pylint: disable=redefined-outer-name
LOGGER.warning('Terminate signal received')
......@@ -35,6 +38,11 @@ def main():
LOGGER.debug('Starting...')
# To create DB
kpiDBobj = Database(Model)
kpiDBobj.create_database()
kpiDBobj.create_tables()
grpc_service = KpiManagerService()
grpc_service.start()
......
# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
......@@ -14,15 +14,38 @@
import logging
from kpi_manager.database.Kpi_DB import KpiDB
#from common.proto.kpi_manager_pb2 import KpiDescriptorList
#from .test_messages import create_kpi_filter_request
from kpi_manager.database.KpiDB import KpiDB
from kpi_manager.database.KpiModel import Kpi as KpiModel
# from common.tools.database.GenericDatabase import Database
LOGGER = logging.getLogger(__name__)
def test_verify_databases_and_Tables():
LOGGER.info('>>> test_verify_Tables : START <<< ')
kpiDBobj = KpiDB()
kpiDBobj = KpiDB(KpiModel)
# kpiDBobj.drop_database()
# kpiDBobj.verify_tables()
kpiDBobj.create_database()
kpiDBobj.create_tables()
kpiDBobj.verify_tables()
# def test_generic_DB_select_method():
# LOGGER.info("--> STARTED-test_generic_DB_select_method")
# kpi_obj = KpiDB()
# _filter = create_kpi_filter_request()
# # response = KpiDescriptorList()
# try:
# kpi_obj.select_with_filter(KpiModel, _filter)
# except Exception as e:
# LOGGER.error('Unable to apply filter on kpi descriptor. {:}'.format(e))
# LOGGER.info("--> FINISHED-test_generic_DB_select_method")
# # try:
# # for row in rows:
# # kpiDescriptor_obj = KpiModel.convert_row_to_KpiDescriptor(row)
# # response.kpi_descriptor_list.append(kpiDescriptor_obj)
# # return response
# # except Exception as e:
# # LOGGER.info('Unable to process filter response {:}'.format(e))
# # assert isinstance(r)
......@@ -139,9 +139,9 @@ def test_SelectKpiDescriptor(kpi_manager_client):
LOGGER.info("Response gRPC message object: {:}".format(response))
assert isinstance(response, KpiDescriptorList)
def test_set_list_of_KPIs(kpi_manager_client):
LOGGER.debug(" >>> test_set_list_of_KPIs: START <<< ")
KPIs_TO_SEARCH = ["node_in_power_total", "node_in_current_total", "node_out_power_total"]
# adding KPI
for kpi in KPIs_TO_SEARCH:
kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request_a(kpi))
# def test_set_list_of_KPIs(kpi_manager_client):
# LOGGER.debug(" >>> test_set_list_of_KPIs: START <<< ")
# KPIs_TO_SEARCH = ["node_in_power_total", "node_in_current_total", "node_out_power_total"]
# # adding KPI
# for kpi in KPIs_TO_SEARCH:
# kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request_a(kpi))
......@@ -59,6 +59,7 @@ unit_test kpi-value-api:
- docker pull "bitnami/kafka:latest"
- >
docker run --name zookeeper -d --network=teraflowbridge -p 2181:2181
--env ALLOW_ANONYMOUS_LOGIN=yes
bitnami/zookeeper:latest
- sleep 10 # Wait for Zookeeper to start
- >
......@@ -85,6 +86,8 @@ unit_test kpi-value-api:
coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/'
after_script:
- docker rm -f $IMAGE_NAME
- docker rm -f kafka
- docker rm -f zookeeper
- docker network rm teraflowbridge
rules:
- if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
......
......@@ -59,6 +59,7 @@ unit_test kpi-value-writer:
- docker pull "bitnami/kafka:latest"
- >
docker run --name zookeeper -d --network=teraflowbridge -p 2181:2181
--env ALLOW_ANONYMOUS_LOGIN=yes
bitnami/zookeeper:latest
- sleep 10 # Wait for Zookeeper to start
- >
......@@ -77,6 +78,8 @@ unit_test kpi-value-writer:
$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG
- sleep 5
- docker ps -a
- docker logs zookeeper
- docker logs kafka
- docker logs $IMAGE_NAME
- >
docker exec -i $IMAGE_NAME bash -c
......@@ -85,8 +88,8 @@ unit_test kpi-value-writer:
coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/'
after_script:
- docker rm -f $IMAGE_NAME
- docker rm -f zookeeper
- docker rm -f kafka
- docker rm -f zookeeper
- docker network rm teraflowbridge
rules:
- if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
......
......@@ -19,7 +19,7 @@ class NameMappings:
def __init__(self) -> None:
self._device_uuid_to_name : Dict[str, str] = dict()
self._endpoint_uuid_to_name : Dict[Tuple[str, str], str] = dict()
def store_device_name(self, device : Device) -> None:
device_uuid = device.device_id.device_uuid.uuid
device_name = device.name
......
......@@ -12,19 +12,23 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import json, logging
import enum, json, logging
import pyangbind.lib.pybindJSON as pybindJSON
from flask import request
from flask.json import jsonify
from flask_restful import Resource
from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME
from common.Settings import get_setting
from common.proto.context_pb2 import ContextId, Empty
from common.tools.context_queries.Topology import get_topology_details
from common.tools.object_factory.Context import json_context_id
from context.client.ContextClient import ContextClient
from nbi.service.rest_server.nbi_plugins.tools.Authentication import HTTP_AUTH
from nbi.service.rest_server.nbi_plugins.tools.HttpStatusCodes import HTTP_OK, HTTP_SERVERERROR
from .bindings import ietf_network
from .ComposeNetwork import compose_network
from .ManualFixes import manual_fixes
from .YangHandler import YangHandler
LOGGER = logging.getLogger(__name__)
......@@ -33,6 +37,14 @@ TE_TOPOLOGY_NAMES = [
'providerId-10-clientId-0-topologyId-2'
]
class Renderer(enum.Enum):
LIBYANG = 'LIBYANG'
PYANGBIND = 'PYANGBIND'
DEFAULT_RENDERER = Renderer.LIBYANG
USE_RENDERER = get_setting('IETF_NETWORK_RENDERER', default=DEFAULT_RENDERER.value)
class Networks(Resource):
@HTTP_AUTH.login_required
def get(self):
......@@ -40,31 +52,59 @@ class Networks(Resource):
topology_id = ''
try:
context_client = ContextClient()
#target = get_slice_by_uuid(context_client, vpn_id, rw_copy=True)
#if target is None:
# raise Exception('VPN({:s}) not found in database'.format(str(vpn_id)))
ietf_nets = ietf_network()
if USE_RENDERER == Renderer.PYANGBIND.value:
#target = get_slice_by_uuid(context_client, vpn_id, rw_copy=True)
#if target is None:
# raise Exception('VPN({:s}) not found in database'.format(str(vpn_id)))
ietf_nets = ietf_network()
topology_details = get_topology_details(
context_client, DEFAULT_TOPOLOGY_NAME, context_uuid=DEFAULT_CONTEXT_NAME,
#rw_copy=True
)
if topology_details is None:
MSG = 'Topology({:s}/{:s}) not found'
raise Exception(MSG.format(DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME))
topology_details = get_topology_details(
context_client, DEFAULT_TOPOLOGY_NAME, context_uuid=DEFAULT_CONTEXT_NAME, #rw_copy=True
)
if topology_details is None:
MSG = 'Topology({:s}/{:s}) not found'
raise Exception(MSG.format(DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME))
for te_topology_name in TE_TOPOLOGY_NAMES:
ietf_net = ietf_nets.networks.network.add(te_topology_name)
compose_network(ietf_net, te_topology_name, topology_details)
for te_topology_name in TE_TOPOLOGY_NAMES:
ietf_net = ietf_nets.networks.network.add(te_topology_name)
compose_network(ietf_net, te_topology_name, topology_details)
# TODO: improve these workarounds to enhance performance
json_response = json.loads(pybindJSON.dumps(ietf_nets, mode='ietf'))
# Workaround; pyangbind does not allow to set otn_topology / eth-tran-topology
manual_fixes(json_response)
elif USE_RENDERER == Renderer.LIBYANG.value:
yang_handler = YangHandler()
json_response = []
# TODO: improve these workarounds to enhance performance
json_response = json.loads(pybindJSON.dumps(ietf_nets, mode='ietf'))
# Workaround; pyangbind does not allow to set otn_topology / eth-tran-topology
manual_fixes(json_response)
contexts = context_client.ListContexts(Empty()).contexts
context_names = [context.name for context in contexts]
LOGGER.info(f'Contexts detected: {context_names}')
for context_name in context_names:
topologies = context_client.ListTopologies(ContextId(**json_context_id(context_name))).topologies
topology_names = [topology.name for topology in topologies]
LOGGER.info(f'Topologies detected for context {context_name}: {topology_names}')
for topology_name in topology_names:
topology_details = get_topology_details(context_client, topology_name, context_name)
if topology_details is None:
raise Exception(f'Topology({context_name}/{topology_name}) not found')
network_reply = yang_handler.compose_network(topology_name, topology_details)
json_response.append(network_reply)
yang_handler.destroy()
else:
raise Exception('Unsupported Renderer: {:s}'.format(str(USE_RENDERER)))
response = jsonify(json_response)
response.status_code = HTTP_OK
except Exception as e: # pylint: disable=broad-except
LOGGER.exception('Something went wrong Retrieving Topology({:s})'.format(str(topology_id)))
response = jsonify({'error': str(e)})
......
# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import libyang, logging, os
from typing import Any
from common.proto.context_pb2 import TopologyDetails, Device, Link
from .NameMapping import NameMappings
from context.client.ContextClient import ContextClient
from common.tools.object_factory.Device import json_device_id
from common.proto.context_pb2 import DeviceId
LOGGER = logging.getLogger(__name__)
YANG_DIR = os.path.join(os.path.dirname(__file__), 'yang')
YANG_MODULES = ['ietf-network', 'ietf-network-topology', 'ietf-l3-unicast-topology']
class YangHandler:
def __init__(self) -> None:
self._yang_context = libyang.Context(YANG_DIR)
for yang_module_name in YANG_MODULES:
LOGGER.info('Loading module: {:s}'.format(str(yang_module_name)))
self._yang_context.load_module(yang_module_name).feature_enable_all()
def compose_network(self, te_topology_name: str, topology_details: TopologyDetails) -> dict:
networks = self._yang_context.create_data_path('/ietf-network:networks')
network = networks.create_path(f'network[network-id="{te_topology_name}"]')
network.create_path('network-id', te_topology_name)
network_types = network.create_path('network-types')
network_types.create_path('ietf-l3-unicast-topology:l3-unicast-topology')
name_mappings = NameMappings()
for device in topology_details.devices:
self.compose_node(device, name_mappings, network)
for link in topology_details.links:
self.compose_link(link, name_mappings, network)
return json.loads(networks.print_mem('json'))
def compose_node(self, dev: Device, name_mappings: NameMappings, network: Any) -> None:
device_name = dev.name
name_mappings.store_device_name(dev)
node = network.create_path(f'node[node-id="{device_name}"]')
node.create_path('node-id', device_name)
node_attributes = node.create_path('ietf-l3-unicast-topology:l3-node-attributes')
node_attributes.create_path('name', device_name)
context_client = ContextClient()
device = context_client.GetDevice(DeviceId(**json_device_id(device_name)))
for endpoint in device.device_endpoints:
name_mappings.store_endpoint_name(dev, endpoint)
self._process_device_config(device, node)
def _process_device_config(self, device: Device, node: Any) -> None:
for config in device.device_config.config_rules:
if config.WhichOneof('config_rule') != 'custom' or '/interface[' not in config.custom.resource_key:
continue
for endpoint in device.device_endpoints:
endpoint_name = endpoint.name
if f'/interface[{endpoint_name}]' in config.custom.resource_key or f'/interface[{endpoint_name}.' in config.custom.resource_key:
interface_name = config.custom.resource_key.split('interface[')[1].split(']')[0]
self._create_termination_point(node, interface_name, endpoint_name, config.custom.resource_value)
def _create_termination_point(self, node: Any, interface_name: str, endpoint_name: str, resource_value: str) -> None:
ip_addresses = self._extract_ip_addresses(json.loads(resource_value))
if ip_addresses:
tp = node.create_path(f'ietf-network-topology:termination-point[tp-id="{interface_name}"]')
tp.create_path('tp-id', interface_name)
tp_attributes = tp.create_path('ietf-l3-unicast-topology:l3-termination-point-attributes')
for ip in ip_addresses:
tp_attributes.create_path('ip-address', ip)
tp_attributes.create_path('interface-name', endpoint_name)
@staticmethod
def _extract_ip_addresses(resource_value: dict) -> list:
ip_addresses = []
if 'address_ip' in resource_value:
ip_addresses.append(resource_value['address_ip'])
if 'address_ipv6' in resource_value:
ip_addresses.append(resource_value['address_ipv6'])
return ip_addresses
def compose_link(self, link_specs: Link, name_mappings: NameMappings, network: Any) -> None:
link_name = link_specs.name
links = network.create_path(f'ietf-network-topology:link[link-id="{link_name}"]')
links.create_path('link-id', link_name)
self._create_link_endpoint(links, 'source', link_specs.link_endpoint_ids[0], name_mappings)
self._create_link_endpoint(links, 'destination', link_specs.link_endpoint_ids[-1], name_mappings)
def _create_link_endpoint(self, links: Any, endpoint_type: str, endpoint_id: Any, name_mappings: NameMappings) -> None:
endpoint = links.create_path(endpoint_type)
if endpoint_type == 'destination': endpoint_type = 'dest'
endpoint.create_path(f'{endpoint_type}-node', name_mappings.get_device_name(endpoint_id.device_id))
endpoint.create_path(f'{endpoint_type}-tp', name_mappings.get_endpoint_name(endpoint_id))
def destroy(self) -> None:
self._yang_context.destroy()
module ietf-l3-unicast-topology {
yang-version 1.1;
namespace
"urn:ietf:params:xml:ns:yang:ietf-l3-unicast-topology";
prefix "l3t";
import ietf-network {
prefix "nw";
}
import ietf-network-topology {
prefix "nt";
}
import ietf-inet-types {
prefix "inet";
}
import ietf-routing-types {
prefix "rt-types";
}
organization
"IETF I2RS (Interface to the Routing System) Working Group";
contact
"WG Web: <https://datatracker.ietf.org/wg/i2rs/>
WG List: <mailto:i2rs@ietf.org>
Editor: Alexander Clemm
<mailto:ludwig@clemm.org>
Editor: Jan Medved
<mailto:jmedved@cisco.com>
Editor: Robert Varga
<mailto:robert.varga@pantheon.tech>
Editor: Xufeng Liu
<mailto:xufeng.liu.ietf@gmail.com>
Editor: Nitin Bahadur
<mailto:nitin_bahadur@yahoo.com>
Editor: Hariharan Ananthakrishnan
<mailto:hari@packetdesign.com>";
description
"This module defines a model for Layer 3 Unicast
topologies.
Copyright (c) 2018 IETF Trust and the persons identified as
authors of the code. All rights reserved.
Redistribution and use in source and binary forms, with or
without modification, is permitted pursuant to, and subject
to the license terms contained in, the Simplified BSD License
set forth in Section 4.c of the IETF Trust's Legal Provisions
Relating to IETF Documents
(https://trustee.ietf.org/license-info).
This version of this YANG module is part of
RFC 8346; see the RFC itself for full legal notices.";
revision "2018-02-26" {
description
"Initial revision.";
reference
"RFC 8346: A YANG Data Model for Layer 3 Topologies";
}
identity flag-identity {
description "Base type for flags";
}
typedef l3-event-type {
type enumeration {
enum "add" {
description
"A Layer 3 node, link, prefix, or termination point has
been added";
}
enum "remove" {
description
"A Layer 3 node, link, prefix, or termination point has
been removed";
}
enum "update" {
description
"A Layer 3 node, link, prefix, or termination point has
been updated";
}
}
description "Layer 3 event type for notifications";
}
typedef prefix-flag-type {
type identityref {
base "flag-identity";
}
description "Prefix flag attributes";
}
typedef node-flag-type {
type identityref {
base "flag-identity";
}
description "Node flag attributes";
}
typedef link-flag-type {
type identityref {
base "flag-identity";
}
description "Link flag attributes";
}
typedef l3-flag-type {
type identityref {
base "flag-identity";
}
description "L3 flag attributes";
}
grouping l3-prefix-attributes {
description
"L3 prefix attributes";
leaf prefix {
type inet:ip-prefix;
description
"IP prefix value";
}
leaf metric {
type uint32;
description
"Prefix metric";
}
leaf-list flag {
type prefix-flag-type;
description
"Prefix flags";
}
}
grouping l3-unicast-topology-type {
description "Identifies the topology type to be L3 Unicast.";
container l3-unicast-topology {
presence "indicates L3 Unicast topology";
description
"The presence of the container node indicates L3 Unicast
topology";
}
}
grouping l3-topology-attributes {
description "Topology scope attributes";
container l3-topology-attributes {
description "Contains topology attributes";
leaf name {
type string;
description
"Name of the topology";
}
leaf-list flag {
type l3-flag-type;
description
"Topology flags";
}
}
}
grouping l3-node-attributes {
description "L3 node scope attributes";
container l3-node-attributes {
description
"Contains node attributes";
leaf name {
type inet:domain-name;
description
"Node name";
}
leaf-list flag {
type node-flag-type;
description
"Node flags";
}
leaf-list router-id {
type rt-types:router-id;
description
"Router-id for the node";
}
list prefix {
key "prefix";
description
"A list of prefixes along with their attributes";
uses l3-prefix-attributes;
}
}
}
grouping l3-link-attributes {
description
"L3 link scope attributes";
container l3-link-attributes {
description
"Contains link attributes";
leaf name {
type string;
description
"Link Name";
}
leaf-list flag {
type link-flag-type;
description
"Link flags";
}
leaf metric1 {
type uint64;
description
"Link Metric 1";
}
leaf metric2 {
type uint64;
description
"Link Metric 2";
}
}
}
grouping l3-termination-point-attributes {
description "L3 termination point scope attributes";
container l3-termination-point-attributes {
description
"Contains termination point attributes";
choice termination-point-type {
description
"Indicates the termination point type";
case ip {
leaf-list ip-address {
type inet:ip-address;
description
"IPv4 or IPv6 address.";
}
}
case unnumbered {
leaf unnumbered-id {
type uint32;
description
"Unnumbered interface identifier.
The identifier will correspond to the ifIndex value
of the interface, i.e., the ifIndex value of the
ifEntry that represents the interface in
implementations where the Interfaces Group MIB
(RFC 2863) is supported.";
reference
"RFC 2863: The Interfaces Group MIB";
}
}
case interface-name {
leaf interface-name {
type string;
description
"Name of the interface. The name can (but does not
have to) correspond to an interface reference of a
containing node's interface, i.e., the path name of a
corresponding interface data node on the containing
node reminiscent of data type interface-ref defined
in RFC 8343. It should be noted that data type
interface-ref of RFC 8343 cannot be used directly,
as this data type is used to reference an interface
in a datastore of a single node in the network, not
to uniquely reference interfaces across a network.";
reference
"RFC 8343: A YANG Data Model for Interface Management";
}
}
}
}
}
augment "/nw:networks/nw:network/nw:network-types" {
description
"Introduces new network type for L3 Unicast topology";
uses l3-unicast-topology-type;
}
augment "/nw:networks/nw:network" {
when "nw:network-types/l3t:l3-unicast-topology" {
description
"Augmentation parameters apply only for networks with
L3 Unicast topology";
}
description
"L3 Unicast for the network as a whole";
uses l3-topology-attributes;
}
augment "/nw:networks/nw:network/nw:node" {
when "../nw:network-types/l3t:l3-unicast-topology" {
description
"Augmentation parameters apply only for networks with
L3 Unicast topology";
}
description
"L3 Unicast node-level attributes ";
uses l3-node-attributes;
}
augment "/nw:networks/nw:network/nt:link" {
when "../nw:network-types/l3t:l3-unicast-topology" {
description
"Augmentation parameters apply only for networks with
L3 Unicast topology";
}
description
"Augments topology link attributes";
uses l3-link-attributes;
}
augment "/nw:networks/nw:network/nw:node/"
+"nt:termination-point" {
when "../../nw:network-types/l3t:l3-unicast-topology" {
description
"Augmentation parameters apply only for networks with
L3 Unicast topology";
}
description "Augments topology termination point configuration";
uses l3-termination-point-attributes;
}
notification l3-node-event {
description
"Notification event for L3 node";
leaf l3-event-type {
type l3-event-type;
description
"Event type";
}
uses nw:node-ref;
uses l3-unicast-topology-type;
uses l3-node-attributes;
}
notification l3-link-event {
description
"Notification event for L3 link";
leaf l3-event-type {
type l3-event-type;
description
"Event type";
}
uses nt:link-ref;
uses l3-unicast-topology-type;
uses l3-link-attributes;
}
notification l3-prefix-event {
description
"Notification event for L3 prefix";
leaf l3-event-type {
type l3-event-type;
description
"Event type";
}
uses nw:node-ref;
uses l3-unicast-topology-type;
container prefix {
description
"Contains L3 prefix attributes";
uses l3-prefix-attributes;
}
}
notification termination-point-event {
description
"Notification event for L3 termination point";
leaf l3-event-type {
type l3-event-type;
description
"Event type";
}
uses nt:tp-ref;
uses l3-unicast-topology-type;
uses l3-termination-point-attributes;
}
}
......@@ -12,14 +12,20 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import deepdiff, json, logging, operator
import deepdiff, json, logging, operator, os
from typing import Dict
from common.Constants import DEFAULT_CONTEXT_NAME
from common.proto.context_pb2 import ContextId
from common.tools.descriptor.Loader import DescriptorLoader, check_descriptor_load_results, validate_empty_scenario
from common.tools.descriptor.Loader import (
DescriptorLoader, check_descriptor_load_results, validate_empty_scenario
)
from common.tools.object_factory.Context import json_context_id
from context.client.ContextClient import ContextClient
from nbi.service.rest_server import RestServer
# Explicitly state NBI to use PyangBind Renderer for this test
os.environ['IETF_NETWORK_RENDERER'] = 'PYANGBIND'
from .PrepareTestScenario import ( # pylint: disable=unused-import
# be careful, order of symbols is important here!
do_rest_get_request, mock_service, nbi_service_rest, osm_wim, context_client
......
......@@ -13,7 +13,7 @@
# limitations under the License.
import os
from common.Settings import get_setting
from common.Settings import get_setting, is_deployed_forecaster
DEFAULT_PATHCOMP_BACKEND_SCHEME = 'http'
DEFAULT_PATHCOMP_BACKEND_HOST = '127.0.0.1'
......@@ -44,6 +44,7 @@ SETTING_NAME_ENABLE_FORECASTER = 'ENABLE_FORECASTER'
TRUE_VALUES = {'Y', 'YES', 'TRUE', 'T', 'E', 'ENABLE', 'ENABLED'}
def is_forecaster_enabled() -> bool:
if not is_deployed_forecaster(): return False
is_enabled = get_setting(SETTING_NAME_ENABLE_FORECASTER, default=None)
if is_enabled is None: return False
str_is_enabled = str(is_enabled).upper()
......
......@@ -22,13 +22,10 @@ import static org.etsi.tfs.policy.common.ApplicationProperties.VALIDATED_POLICYR
import io.smallrye.mutiny.Uni;
import jakarta.enterprise.context.ApplicationScoped;
import jakarta.inject.Inject;
import java.util.List;
import org.etsi.tfs.policy.context.ContextService;
import org.etsi.tfs.policy.context.model.ServiceId;
import org.etsi.tfs.policy.exception.ExternalServiceFailureException;
import org.etsi.tfs.policy.monitoring.model.AlarmDescriptor;
import org.etsi.tfs.policy.policy.model.PolicyRule;
import org.etsi.tfs.policy.policy.model.PolicyRuleBasic;
import org.etsi.tfs.policy.policy.model.PolicyRuleService;
import org.etsi.tfs.policy.policy.model.PolicyRuleState;
import org.etsi.tfs.policy.policy.model.PolicyRuleStateEnum;
......@@ -38,14 +35,10 @@ import org.etsi.tfs.policy.policy.model.PolicyRuleTypeService;
public class AddPolicyServiceImpl {
@Inject private CommonPolicyServiceImpl commonPolicyService;
@Inject private CommonAlarmService commonAlarmService;
@Inject private ContextService contextService;
public Uni<PolicyRuleState> constructPolicyStateBasedOnCriteria(
Boolean isService,
ServiceId serviceId,
PolicyRuleService policyRuleService,
PolicyRuleBasic policyRuleBasic) {
Boolean isService, ServiceId serviceId, PolicyRuleService policyRuleService) {
if (!isService) {
var policyRuleState =
......@@ -57,36 +50,20 @@ public class AddPolicyServiceImpl {
final var policyRuleTypeService = new PolicyRuleTypeService(policyRuleService);
final var policyRule = new PolicyRule(policyRuleTypeService);
final var alarmDescriptorList = commonPolicyService.createAlarmDescriptorList(policyRule);
if (alarmDescriptorList.isEmpty()) {
var policyRuleState =
new PolicyRuleState(
PolicyRuleStateEnum.POLICY_FAILED,
String.format(
"Invalid PolicyRuleConditions in PolicyRule with ID: %s",
policyRuleBasic.getPolicyRuleId()));
return Uni.createFrom().item(policyRuleState);
}
final String kpiId =
policyRuleService.getPolicyRuleBasic().getPolicyRuleConditions().get(0).getKpiId();
commonPolicyService.getKpiPolicyRuleServiceMap().put(kpiId, policyRuleService);
return setPolicyRuleOnContextAndReturnState(policyRule, policyRuleService, alarmDescriptorList);
return setPolicyRuleOnContextAndReturnState(policyRule);
}
private Uni<PolicyRuleState> setPolicyRuleOnContextAndReturnState(
PolicyRule policyRule,
PolicyRuleService policyRuleService,
List<AlarmDescriptor> alarmDescriptorList) {
private Uni<PolicyRuleState> setPolicyRuleOnContextAndReturnState(PolicyRule policyRule) {
return contextService
.setPolicyRule(policyRule)
.onFailure()
.transform(failure -> new ExternalServiceFailureException(failure.getMessage()))
.onItem()
.transform(
policyId -> {
commonAlarmService.startMonitoringBasedOnAlarmDescriptors(
policyId, policyRuleService, alarmDescriptorList);
return VALIDATED_POLICYRULE_STATE;
});
.transform(policyId -> VALIDATED_POLICYRULE_STATE);
}
}
......@@ -77,7 +77,8 @@ public class CommonPolicyServiceImpl {
// TODO: Find a better way to disregard alarms while reconfiguring path
// Temporary solution for not calling the same rpc more than it's needed
public static int noAlarms = 0;
private ConcurrentHashMap<String, PolicyRuleService> kpiPolicyRuleServiceMap =
new ConcurrentHashMap<>();
private ConcurrentHashMap<String, PolicyRuleService> alarmPolicyRuleServiceMap =
new ConcurrentHashMap<>();
private ConcurrentHashMap<String, PolicyRuleDevice> alarmPolicyRuleDeviceMap =
......@@ -89,6 +90,10 @@ public class CommonPolicyServiceImpl {
return subscriptionList;
}
public ConcurrentHashMap<String, PolicyRuleService> getKpiPolicyRuleServiceMap() {
return kpiPolicyRuleServiceMap;
}
public ConcurrentHashMap<String, PolicyRuleService> getAlarmPolicyRuleServiceMap() {
return alarmPolicyRuleServiceMap;
}
......@@ -111,6 +116,31 @@ public class CommonPolicyServiceImpl {
return Long.valueOf(now).doubleValue();
}
public void applyActionServiceBasedOnKpiId(String kpiId) {
if (!kpiPolicyRuleServiceMap.contains(kpiId)) {
LOGGER.info("No Policy for KpiId");
return;
}
PolicyRuleService policyRuleService = kpiPolicyRuleServiceMap.get(kpiId);
PolicyRuleAction policyRuleAction =
policyRuleService.getPolicyRuleBasic().getPolicyRuleActions().get(0);
setPolicyRuleServiceToContext(policyRuleService, ACTIVE_POLICYRULE_STATE);
switch (policyRuleAction.getPolicyRuleActionEnum()) {
case POLICY_RULE_ACTION_ADD_SERVICE_CONSTRAINT:
addServiceConstraint(policyRuleService, policyRuleAction);
case POLICY_RULE_ACTION_ADD_SERVICE_CONFIGRULE:
addServiceConfigRule(policyRuleService, policyRuleAction);
case POLICY_RULE_ACTION_RECALCULATE_PATH:
callRecalculatePathRPC(policyRuleService, policyRuleAction);
default:
LOGGER.errorf(INVALID_MESSAGE, policyRuleAction.getPolicyRuleActionEnum());
return;
}
}
public void applyActionService(String alarmId) {
PolicyRuleService policyRuleService = alarmPolicyRuleServiceMap.get(alarmId);
PolicyRuleAction policyRuleAction =
......
......@@ -90,7 +90,7 @@ public class PolicyServiceImpl implements PolicyService {
.transform(
isService ->
addPolicyServiceImpl.constructPolicyStateBasedOnCriteria(
isService, serviceId, policyRuleService, policyRuleBasic))
isService, serviceId, policyRuleService))
.flatMap(Function.identity());
}
......
......@@ -14,14 +14,13 @@
* limitations under the License.
*/
package org.etsi.tfs.policy.policy;
import static org.etsi.tfs.policy.common.ApplicationProperties.*;
package org.etsi.tfs.policy.policy.kafka;
import io.smallrye.reactive.messaging.annotations.Blocking;
import jakarta.enterprise.context.ApplicationScoped;
import jakarta.inject.Inject;
import org.eclipse.microprofile.reactive.messaging.Incoming;
import org.etsi.tfs.policy.policy.CommonPolicyServiceImpl;
import org.etsi.tfs.policy.policy.model.AlarmTopicDTO;
import org.jboss.logging.Logger;
......@@ -40,25 +39,12 @@ public class AlarmListener {
@Incoming(ALARM_TOPIC)
@Blocking
public void receiveAlarm(AlarmTopicDTO alarmTopicDto) {
logger.infof("Received Alarm for analytic service backend :\n %s", alarmTopicDto.toString());
if (!alarmTopicDto.getKpiId().isEmpty()) {
alarmTopicDto
.getAlarms()
.forEach(
(key, value) -> {
if (value) {
logger.infof(
"**************************Received Alarm!**************************");
logger.infof("alarmTopicDto:");
logger.info(alarmTopicDto.toString());
logger.infof(
"Received Alarm for analytic service backend with id:\n %s", key.toString());
//
// commonPolicyServiceImpl.applyActionService(alarmResponse.getAlarmId());
}
});
logger.infof("Received message for analytic service backend :\n %s", alarmTopicDto.toString());
if (alarmTopicDto.isThresholdRaise() || alarmTopicDto.isThresholdFall()) {
logger.infof("**************************Received Alarm!**************************");
logger.infof(
"Received Alarm for analytic service backend with kpiId: %s", alarmTopicDto.getKpiId());
commonPolicyServiceImpl.applyActionServiceBasedOnKpiId(alarmTopicDto.getKpiId());
}
}
}