Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • tfs/controller
1 result
Show changes
Showing
with 626 additions and 48 deletions
#!/bin/bash
# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
########################################################################################################################
# Define your deployment settings here
########################################################################################################################
# If not already set, set the name of the Kubernetes namespace to deploy to.
export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"}
########################################################################################################################
# Automated steps start here
########################################################################################################################
kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/kpi-value-writerservice -c server
#!/bin/bash
# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
########################################################################################################################
# Define your deployment settings here
########################################################################################################################
# If not already set, set the name of the Kubernetes namespace to deploy to.
export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"crdb"}
########################################################################################################################
# Automated steps start here
########################################################################################################################
kubectl --namespace $TFS_K8S_NAMESPACE logs cockroachdb-0
...@@ -62,8 +62,12 @@ RUN python3 -m pip install -r requirements.txt ...@@ -62,8 +62,12 @@ RUN python3 -m pip install -r requirements.txt
# Add component files into working directory # Add component files into working directory
WORKDIR /var/teraflow WORKDIR /var/teraflow
COPY src/telemetry/frontend/__init__.py telemetry/frontend/__init__.py
COPY src/telemetry/frontend/client/. telemetry/frontend/client/
COPY src/context/__init__.py context/__init__.py COPY src/context/__init__.py context/__init__.py
COPY src/context/client/. context/client/ COPY src/context/client/. context/client/
COPY src/kpi_manager/__init__.py kpi_manager/__init__.py
COPY src/kpi_manager/client/. kpi_manager/client/
COPY src/monitoring/__init__.py monitoring/__init__.py COPY src/monitoring/__init__.py monitoring/__init__.py
COPY src/monitoring/client/. monitoring/client/ COPY src/monitoring/client/. monitoring/client/
COPY src/automation/. automation/ COPY src/automation/. automation/
......
# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import grpc, logging
from common.Constants import ServiceNameEnum
from common.Settings import get_service_host, get_service_port_grpc
from common.proto.policy_pb2 import PolicyRuleService, PolicyRuleState
from common.proto.policy_pb2_grpc import PolicyServiceStub
from common.tools.client.RetryDecorator import retry, delay_exponential
from common.tools.grpc.Tools import grpc_message_to_json_string
from common.proto.openconfig_device_pb2_grpc import OpenConfigServiceStub
LOGGER = logging.getLogger(__name__)
MAX_RETRIES = 15
DELAY_FUNCTION = delay_exponential(initial=0.01, increment=2.0, maximum=5.0)
RETRY_DECORATOR = retry(max_retries=MAX_RETRIES, delay_function=DELAY_FUNCTION, prepare_method_name='connect')
class PolicyClient:
def __init__(self, host=None, port=None):
if not host: host = get_service_host(ServiceNameEnum.POLICY)
if not port: port = get_service_port_grpc(ServiceNameEnum.POLICY)
self.endpoint = '{:s}:{:s}'.format(str(host), str(port))
LOGGER.info('Creating channel to {:s}...'.format(str(self.endpoint)))
self.channel = None
self.stub = None
self.openconfig_stub=None
self.connect()
LOGGER.info('Channel created')
def connect(self):
self.channel = grpc.insecure_channel(self.endpoint)
self.stub = PolicyServiceStub(self.channel)
self.openconfig_stub=OpenConfigServiceStub(self.channel)
def close(self):
if self.channel is not None: self.channel.close()
self.channel = None
self.stub = None
@RETRY_DECORATOR
def PolicyAddService(self, request : PolicyRuleService) -> PolicyRuleState:
LOGGER.debug('AddPolicy request: {:s}'.format(grpc_message_to_json_string(request)))
response = self.stub.PolicyAddService(request)
LOGGER.debug('AddPolicy result: {:s}'.format(grpc_message_to_json_string(response)))
return response
\ No newline at end of file
...@@ -18,16 +18,171 @@ from common.method_wrappers.Decorator import MetricsPool ...@@ -18,16 +18,171 @@ from common.method_wrappers.Decorator import MetricsPool
from common.proto.automation_pb2_grpc import AutomationServiceServicer from common.proto.automation_pb2_grpc import AutomationServiceServicer
from common.proto.automation_pb2 import ( ZSMCreateRequest , ZSMService ,ZSMServiceID ,ZSMServiceState,ZSMCreateUpdate , ZSMServiceStateEnum) from common.proto.automation_pb2 import ( ZSMCreateRequest , ZSMService ,ZSMServiceID ,ZSMServiceState,ZSMCreateUpdate , ZSMServiceStateEnum)
from common.proto.context_pb2 import ( ServiceId , ContextId , Uuid , Empty) from common.proto.context_pb2 import ( ServiceId , ContextId , Uuid , Empty)
from common.proto.telemetry_frontend_pb2 import ( Collector , CollectorId )
from common.proto.policy_pb2 import ( PolicyRuleList) from common.proto.policy_pb2 import ( PolicyRuleList)
from context.client.ContextClient import ContextClient
from automation.client.PolicyClient import PolicyClient
from telemetry.frontend.client.TelemetryFrontendClient import TelemetryFrontendClient
from kpi_manager.client.KpiManagerClient import KpiManagerClient
from common.proto.context_pb2 import ( Service )
from common.proto.kpi_manager_pb2 import (KpiId, KpiDescriptor)
from common.proto.policy_pb2 import PolicyRuleService, PolicyRuleState
from common.proto.policy_action_pb2 import PolicyRuleAction , PolicyRuleActionConfig
from common.proto.policy_condition_pb2 import PolicyRuleCondition
from uuid import uuid4
from common.method_wrappers.ServiceExceptions import InvalidArgumentException
LOGGER = logging.getLogger(__name__) LOGGER = logging.getLogger(__name__)
METRICS_POOL = MetricsPool('Automation', 'RPC') METRICS_POOL = MetricsPool('Automation', 'RPC')
class AutomationServiceServicerImpl(AutomationServiceServicer): class AutomationServiceServicerImpl(AutomationServiceServicer):
def __init__(self):
LOGGER.info('Init AutomationService')
@safe_and_metered_rpc_method(METRICS_POOL,LOGGER) @safe_and_metered_rpc_method(METRICS_POOL,LOGGER)
def ZSMCreate(self, request : ZSMCreateRequest, context : grpc.ServicerContext) -> ZSMService: def ZSMCreate(self, request : ZSMCreateRequest, context : grpc.ServicerContext) -> ZSMService:
LOGGER.info('NOT IMPLEMENTED ZSMCreate')
# check that service does not exist
context_client = ContextClient()
kpi_manager_client = KpiManagerClient()
policy_client = PolicyClient()
telemetry_frontend_client = TelemetryFrontendClient()
LOGGER.info('Trying to get the service ')
LOGGER.info('request.serviceId.service_uuid.uuid({:s})'.format(str(request.serviceId.service_uuid.uuid)))
LOGGER.info('request.serviceId.service_uuid({:s})'.format(str(request.serviceId.service_uuid)))
LOGGER.info('request.serviceId({:s})'.format(str(request.serviceId)))
LOGGER.info('Request({:s})'.format(str(request)))
try:
####### GET Context #######################
service: Service = context_client.GetService(request.serviceId)
LOGGER.info('service({:s})'.format(str(service)))
###########################################
####### SET Kpi Descriptor LAT ################
# if(len(service.service_constraints) == 0):
# raise InvalidArgumentException("argument_name" , "argument_value", []);
# KPI Descriptor
kpi_descriptor_lat = KpiDescriptor()
kpi_descriptor_lat.kpi_sample_type = 701 #'KPISAMPLETYPE_SERVICE_LATENCY_MS' #static service.service_constraints[].sla_latency.e2e_latency_ms
kpi_descriptor_lat.service_id.service_uuid.uuid = request.serviceId.service_uuid.uuid
kpi_descriptor_lat.kpi_id.kpi_id.uuid = str(uuid4())
kpi_id_lat: KpiId = kpi_manager_client.SetKpiDescriptor(kpi_descriptor_lat)
LOGGER.info('kpi_id_lat({:s})'.format(str(kpi_id_lat)))
###########################################
####### SET Kpi Descriptor TX ################
kpi_descriptor_tx = KpiDescriptor()
kpi_descriptor_tx.kpi_sample_type = 101 # static KPISAMPLETYPE_PACKETS_TRANSMITTED
kpi_descriptor_tx.service_id.service_uuid.uuid = request.serviceId.service_uuid.uuid
kpi_descriptor_tx.kpi_id.kpi_id.uuid = str(uuid4())
kpi_id_tx: KpiId = kpi_manager_client.SetKpiDescriptor(kpi_descriptor_tx)
LOGGER.info('kpi_id_tx({:s})'.format(str(kpi_id_tx)))
###########################################
####### SET Kpi Descriptor RX ################
kpi_descriptor_rx = KpiDescriptor()
kpi_descriptor_rx.kpi_sample_type = 102 # static KPISAMPLETYPE_PACKETS_RECEIVED
kpi_descriptor_rx.service_id.service_uuid.uuid = request.serviceId.service_uuid.uuid
kpi_descriptor_rx.kpi_id.kpi_id.uuid = str(uuid4())
kpi_id_rx: KpiId = kpi_manager_client.SetKpiDescriptor(kpi_descriptor_rx)
LOGGER.info('kpi_id_rx({:s})'.format(str(kpi_id_rx)))
###########################################
####### START Analyzer LAT ################
# analyzer = Analyzer()
# analyzer.algorithm_name = '' # static
# analyzer.operation_mode = ''
# analyzer.input_kpi_ids[] = [kpi_id_rx,kpi_id_tx]
# analyzer.output_kpi_ids[] = [kpi_id_lat]
#
# analyzer_id_lat: AnalyzerId = analyzer_client.StartAnalyzer(analyzer)
# LOGGER.info('analyzer_id_lat({:s})'.format(str(analyzer_id_lat)))
###########################################
####### SET Policy LAT ################
policy_lat = PolicyRuleService()
policy_lat.serviceId.service_uuid.uuid = request.serviceId.service_uuid.uuid
policy_lat.serviceId.context_id.context_uuid.uuid = request.serviceId.context_id.context_uuid.uuid
# PolicyRuleBasic
policy_lat.policyRuleBasic.priority = 0
policy_lat.policyRuleBasic.policyRuleId.uuid.uuid = str(uuid4())
policy_lat.policyRuleBasic.booleanOperator = 2
# PolicyRuleAction
policyRuleActionConfig = PolicyRuleActionConfig()
policyRuleActionConfig.action_key = ""
policyRuleActionConfig.action_value = ""
policyRuleAction = PolicyRuleAction()
policyRuleAction.action = 5
policyRuleAction.action_config.append(policyRuleActionConfig)
policy_lat.policyRuleBasic.actionList.append(policyRuleAction)
# for constraint in service.service_constraints:
# PolicyRuleCondition
policyRuleCondition = PolicyRuleCondition()
policyRuleCondition.kpiId.kpi_id.uuid = kpi_id_lat.kpi_id.uuid
policyRuleCondition.numericalOperator = 5
policyRuleCondition.kpiValue.floatVal = 300 #constraint.sla_latency.e2e_latency_ms
policy_lat.policyRuleBasic.conditionList.append(policyRuleCondition)
policy_rule_state: PolicyRuleState = policy_client.PolicyAddService(policy_lat)
LOGGER.info('policy_rule_state({:s})'.format(str(policy_rule_state)))
####### START Collector TX #################
collect_tx = Collector()
collect_tx.collector_id.collector_id.uuid = str(uuid4())
collect_tx.kpi_id.kpi_id.uuid = kpi_id_tx.kpi_id.uuid
collect_tx.duration_s = 0 # static
collect_tx.interval_s = 1 # static
LOGGER.info('Start Collector TX'.format(str(collect_tx)))
collect_id_tx: CollectorId = telemetry_frontend_client.StartCollector(collect_tx)
LOGGER.info('collect_id_tx({:s})'.format(str(collect_id_tx)))
#############################################
####### START Collector RX ##################
collect_rx = Collector()
collect_rx.collector_id.collector_id.uuid = str(uuid4())
collect_rx.kpi_id.kpi_id.uuid = kpi_id_rx.kpi_id.uuid
collect_rx.duration_s = 0 # static
collect_rx.interval_s = 1 # static
LOGGER.info('Start Collector RX'.format(str(collect_rx)))
collect_id_rx: CollectorId = telemetry_frontend_client.StartCollector(collect_rx)
LOGGER.info('collect_id_tx({:s})'.format(str(collect_id_rx)))
###############################################
except grpc.RpcError as e:
if e.code() != grpc.StatusCode.NOT_FOUND: raise # pylint: disable=no-member
LOGGER.exception('Unable to get Service({:s})'.format(str(request)))
context_client.close()
kpi_manager_client.close()
policy_client.close()
telemetry_frontend_client.close()
return None
LOGGER.info('Here is the service')
context_client.close()
kpi_manager_client.close()
policy_client.close()
telemetry_frontend_client.close()
return ZSMService() return ZSMService()
@safe_and_metered_rpc_method(METRICS_POOL,LOGGER) @safe_and_metered_rpc_method(METRICS_POOL,LOGGER)
......
<!-- Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) <!-- Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
......
<!-- Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) <!-- Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
......
<!-- Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) <!-- Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
......
...@@ -59,9 +59,13 @@ class ServiceNameEnum(Enum): ...@@ -59,9 +59,13 @@ class ServiceNameEnum(Enum):
CACHING = 'caching' CACHING = 'caching'
TE = 'te' TE = 'te'
FORECASTER = 'forecaster' FORECASTER = 'forecaster'
E2EORCHESTRATOR = 'e2eorchestrator' E2EORCHESTRATOR = 'e2e-orchestrator'
OPTICALCONTROLLER = 'opticalcontroller' OPTICALCONTROLLER = 'opticalcontroller'
BGPLS = 'bgpls-speaker' BGPLS = 'bgpls-speaker'
KPIMANAGER = 'kpi-manager'
KPIVALUEAPI = 'kpi-value-api'
KPIVALUEWRITER = 'kpi-value-writer'
TELEMETRYFRONTEND = 'telemetry-frontend'
# Used for test and debugging only # Used for test and debugging only
DLT_GATEWAY = 'dltgateway' DLT_GATEWAY = 'dltgateway'
...@@ -92,6 +96,10 @@ DEFAULT_SERVICE_GRPC_PORTS = { ...@@ -92,6 +96,10 @@ DEFAULT_SERVICE_GRPC_PORTS = {
ServiceNameEnum.E2EORCHESTRATOR .value : 10050, ServiceNameEnum.E2EORCHESTRATOR .value : 10050,
ServiceNameEnum.OPTICALCONTROLLER .value : 10060, ServiceNameEnum.OPTICALCONTROLLER .value : 10060,
ServiceNameEnum.BGPLS .value : 20030, ServiceNameEnum.BGPLS .value : 20030,
ServiceNameEnum.KPIMANAGER .value : 30010,
ServiceNameEnum.KPIVALUEAPI .value : 30020,
ServiceNameEnum.KPIVALUEWRITER .value : 30030,
ServiceNameEnum.TELEMETRYFRONTEND .value : 30050,
# Used for test and debugging only # Used for test and debugging only
ServiceNameEnum.DLT_GATEWAY .value : 50051, ServiceNameEnum.DLT_GATEWAY .value : 50051,
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import logging, os, time import logging, os, re, time
from typing import Dict, List from typing import Dict, List
from common.Constants import ( from common.Constants import (
DEFAULT_GRPC_BIND_ADDRESS, DEFAULT_GRPC_GRACE_PERIOD, DEFAULT_GRPC_MAX_WORKERS, DEFAULT_HTTP_BIND_ADDRESS, DEFAULT_GRPC_BIND_ADDRESS, DEFAULT_GRPC_GRACE_PERIOD, DEFAULT_GRPC_MAX_WORKERS, DEFAULT_HTTP_BIND_ADDRESS,
...@@ -68,7 +68,8 @@ def get_setting(name, **kwargs): ...@@ -68,7 +68,8 @@ def get_setting(name, **kwargs):
raise Exception('Setting({:s}) not specified in environment or configuration'.format(str(name))) raise Exception('Setting({:s}) not specified in environment or configuration'.format(str(name)))
def get_env_var_name(service_name : ServiceNameEnum, env_var_group): def get_env_var_name(service_name : ServiceNameEnum, env_var_group):
return ('{:s}SERVICE_{:s}'.format(service_name.value, env_var_group)).upper() service_name = re.sub(r'[^a-zA-Z0-9]', '_', service_name.value)
return ('{:s}SERVICE_{:s}'.format(service_name, env_var_group)).upper()
def get_service_host(service_name : ServiceNameEnum): def get_service_host(service_name : ServiceNameEnum):
envvar_name = get_env_var_name(service_name, ENVVAR_SUFIX_SERVICE_HOST) envvar_name = get_env_var_name(service_name, ENVVAR_SUFIX_SERVICE_HOST)
......
# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from enum import Enum
from confluent_kafka import KafkaException
from confluent_kafka.admin import AdminClient, NewTopic
LOGGER = logging.getLogger(__name__)
class KafkaConfig(Enum):
# SERVER_IP = "127.0.0.1:9092"
SERVER_IP = "kafka-service.kafka.svc.cluster.local:9092"
ADMIN_CLIENT = AdminClient({'bootstrap.servers': SERVER_IP})
class KafkaTopic(Enum):
REQUEST = 'topic_request'
RESPONSE = 'topic_response'
RAW = 'topic_raw'
LABELED = 'topic_labeled'
VALUE = 'topic_value'
@staticmethod
def create_all_topics() -> bool:
"""
Method to create Kafka topics defined as class members
"""
all_topics = [member.value for member in KafkaTopic]
if( KafkaTopic.create_new_topic_if_not_exists( all_topics )):
LOGGER.debug("All topics are created sucsessfully")
return True
else:
LOGGER.debug("Error creating all topics")
return False
@staticmethod
def create_new_topic_if_not_exists(new_topics: list) -> bool:
"""
Method to create Kafka topic if it does not exist.
Args:
list of topic: containing the topic name(s) to be created on Kafka
"""
LOGGER.debug("Topics names to be verified and created: {:}".format(new_topics))
for topic in new_topics:
try:
topic_metadata = KafkaConfig.ADMIN_CLIENT.value.list_topics(timeout=5)
# LOGGER.debug("Existing topic list: {:}".format(topic_metadata.topics))
if topic not in topic_metadata.topics:
# If the topic does not exist, create a new topic
print("Topic {:} does not exist. Creating...".format(topic))
LOGGER.debug("Topic {:} does not exist. Creating...".format(topic))
new_topic = NewTopic(topic, num_partitions=1, replication_factor=1)
KafkaConfig.ADMIN_CLIENT.value.create_topics([new_topic])
else:
print("Topic name already exists: {:}".format(topic))
LOGGER.debug("Topic name already exists: {:}".format(topic))
except Exception as e:
LOGGER.debug("Failed to create topic: {:}".format(e))
return False
return True
# create all topics after the deployments (Telemetry and Analytics)
...@@ -23,7 +23,8 @@ Flask==2.1.3 ...@@ -23,7 +23,8 @@ Flask==2.1.3
Flask-HTTPAuth==4.5.0 Flask-HTTPAuth==4.5.0
Flask-RESTful==0.3.9 Flask-RESTful==0.3.9
Jinja2==3.0.3 Jinja2==3.0.3
ncclient==0.6.13 numpy<2.0.0
ncclient==0.6.15
p4runtime==1.3.0 p4runtime==1.3.0
pandas==1.5.* pandas==1.5.*
paramiko==2.9.2 paramiko==2.9.2
......
...@@ -20,7 +20,7 @@ from .Tools import add_value_from_tag ...@@ -20,7 +20,7 @@ from .Tools import add_value_from_tag
LOGGER = logging.getLogger(__name__) LOGGER = logging.getLogger(__name__)
XPATH_ACL_SET = "//ocacl:acl/ocacl:acl-sets/ocacl:acl-set" XPATH_ACL_SET = "//ocacl:acl/ocacl:acl-sets/ocacl:acl-set"
XPATH_A_ACL_ENTRY = ".//ocacl:acl-entries/ocacl:ecl-entry" XPATH_A_ACL_ENTRY = ".//ocacl:acl-entries/ocacl:acl-entry"
XPATH_A_IPv4 = ".//ocacl:ipv4/ocacl:config" XPATH_A_IPv4 = ".//ocacl:ipv4/ocacl:config"
XPATH_A_TRANSPORT = ".//ocacl:transport/ocacl:config" XPATH_A_TRANSPORT = ".//ocacl:transport/ocacl:config"
XPATH_A_ACTIONS = ".//ocacl:actions/ocacl:config" XPATH_A_ACTIONS = ".//ocacl:actions/ocacl:config"
...@@ -34,29 +34,31 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]: ...@@ -34,29 +34,31 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]:
response = [] response = []
acl = {} acl = {}
name = {}
for xml_acl in xml_data.xpath(XPATH_ACL_SET, namespaces=NAMESPACES): for xml_acl in xml_data.xpath(XPATH_ACL_SET, namespaces=NAMESPACES):
#LOGGER.info('xml_acl = {:s}'.format(str(ET.tostring(xml_acl)))) #LOGGER.info('xml_acl = {:s}'.format(str(ET.tostring(xml_acl))))
acl_name = xml_acl.find('ocacl:name', namespaces=NAMESPACES) acl_name = xml_acl.find('ocacl:name', namespaces=NAMESPACES)
if acl_name is None or acl_name.text is None: continue if acl_name is None or acl_name.text is None: continue
add_value_from_tag(acl, 'name', acl_name) add_value_from_tag(name, 'name', acl_name)
acl_type = xml_acl.find('ocacl:type', namespaces=NAMESPACES) acl_type = xml_acl.find('ocacl:type', namespaces=NAMESPACES)
add_value_from_tag(acl, 'type', acl_type) add_value_from_tag(acl, 'type', acl_type)
for xml_acl_entries in xml_acl.xpath(XPATH_A_ACL_ENTRY, namespaces=NAMESPACES): for xml_acl_entries in xml_acl.xpath(XPATH_A_ACL_ENTRY, namespaces=NAMESPACES):
acl_id = xml_acl_entries.find('ocacl:sequence_id', namespaces=NAMESPACES) acl_id = xml_acl_entries.find('ocacl:sequence-id', namespaces=NAMESPACES)
add_value_from_tag(acl, 'sequence_id', acl_id) add_value_from_tag(acl, 'sequence-id', acl_id)
LOGGER.info('xml_acl_id = {:s}'.format(str(ET.tostring(acl_id))))
for xml_ipv4 in xml_acl_entries.xpath(XPATH_A_IPv4, namespaces=NAMESPACES): for xml_ipv4 in xml_acl_entries.xpath(XPATH_A_IPv4, namespaces=NAMESPACES):
ipv4_source = xml_ipv4.find('ocacl:source_address', namespaces=NAMESPACES) ipv4_source = xml_ipv4.find('ocacl:source-address', namespaces=NAMESPACES)
add_value_from_tag(acl, 'source_address' , ipv4_source) add_value_from_tag(acl, 'source-address' , ipv4_source)
ipv4_destination = xml_ipv4.find('ocacl:destination_address', namespaces=NAMESPACES) ipv4_destination = xml_ipv4.find('ocacl:destination-address', namespaces=NAMESPACES)
add_value_from_tag(acl, 'destination_address' , ipv4_destination) add_value_from_tag(acl, 'destination-address' , ipv4_destination)
ipv4_protocol = xml_ipv4.find('ocacl:protocol', namespaces=NAMESPACES) ipv4_protocol = xml_ipv4.find('ocacl:protocol', namespaces=NAMESPACES)
add_value_from_tag(acl, 'protocol' , ipv4_protocol) add_value_from_tag(acl, 'protocol' , ipv4_protocol)
...@@ -64,30 +66,30 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]: ...@@ -64,30 +66,30 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]:
ipv4_dscp = xml_ipv4.find('ocacl:dscp', namespaces=NAMESPACES) ipv4_dscp = xml_ipv4.find('ocacl:dscp', namespaces=NAMESPACES)
add_value_from_tag(acl, 'dscp' , ipv4_dscp) add_value_from_tag(acl, 'dscp' , ipv4_dscp)
ipv4_hop_limit = xml_ipv4.find('ocacl:hop_limit', namespaces=NAMESPACES) ipv4_hop_limit = xml_ipv4.find('ocacl:hop-limit', namespaces=NAMESPACES)
add_value_from_tag(acl, 'hop_limit' , ipv4_hop_limit) add_value_from_tag(acl, 'hop-limit' , ipv4_hop_limit)
for xml_transport in xml_acl_entries.xpath(XPATH_A_TRANSPORT, namespaces=NAMESPACES): for xml_transport in xml_acl_entries.xpath(XPATH_A_TRANSPORT, namespaces=NAMESPACES):
transport_source = xml_transport.find('ocacl:source_port', namespaces=NAMESPACES) transport_source = xml_transport.find('ocacl:source-port', namespaces=NAMESPACES)
add_value_from_tag(acl, 'source_port' ,transport_source) add_value_from_tag(acl, 'source-port' ,transport_source)
transport_destination = xml_transport.find('ocacl:destination_port', namespaces=NAMESPACES) transport_destination = xml_transport.find('ocacl:destination-port', namespaces=NAMESPACES)
add_value_from_tag(acl, 'destination_port' ,transport_destination) add_value_from_tag(acl, 'destination-port' ,transport_destination)
transport_tcp_flags = xml_transport.find('ocacl:tcp_flags', namespaces=NAMESPACES) transport_tcp_flags = xml_transport.find('ocacl:tcp-flags', namespaces=NAMESPACES)
add_value_from_tag(acl, 'tcp_flags' ,transport_tcp_flags) add_value_from_tag(acl, 'tcp-flags' ,transport_tcp_flags)
for xml_action in xml_acl_entries.xpath(XPATH_A_ACTIONS, namespaces=NAMESPACES): for xml_action in xml_acl_entries.xpath(XPATH_A_ACTIONS, namespaces=NAMESPACES):
action = xml_action.find('ocacl:forwarding_action', namespaces=NAMESPACES) action = xml_action.find('ocacl:forwarding-action', namespaces=NAMESPACES)
add_value_from_tag(acl, 'forwarding_action' ,action) add_value_from_tag(acl, 'forwarding-action' ,action)
log_action = xml_action.find('ocacl:log_action', namespaces=NAMESPACES) log_action = xml_action.find('ocacl:log-action', namespaces=NAMESPACES)
add_value_from_tag(acl, 'log_action' ,log_action) add_value_from_tag(acl, 'log-action' ,log_action)
resource_key = '/acl/acl-set[{:s}][{:s}]/acl-entry[{:s}]'.format( resource_key = '/acl/acl-set[{:s}][{:s}]/acl-entry[{:s}]'.format(
acl['name'], acl['type'], acl['sequence-id']) name['name'], acl['type'], acl['sequence-id'])
response.append((resource_key,acl)) response.append((resource_key,acl))
for xml_interface in xml_data.xpath(XPATH_INTERFACE, namespaces=NAMESPACES): for xml_interface in xml_data.xpath(XPATH_INTERFACE, namespaces=NAMESPACES):
...@@ -99,25 +101,25 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]: ...@@ -99,25 +101,25 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]:
for xml_ingress in xml_interface.xpath(XPATH_I_INGRESS, namespaces=NAMESPACES): for xml_ingress in xml_interface.xpath(XPATH_I_INGRESS, namespaces=NAMESPACES):
i_name = xml_ingress.find('ocacl:set_name_ingress', namespaces=NAMESPACES) i_name = xml_ingress.find('ocacl:set-name-ingress', namespaces=NAMESPACES)
add_value_from_tag(interface, 'ingress_set_name' , i_name) add_value_from_tag(interface, 'ingress-set-name' , i_name)
i_type = xml_ingress.find('ocacl:type_ingress', namespaces=NAMESPACES) i_type = xml_ingress.find('ocacl:type-ingress', namespaces=NAMESPACES)
add_value_from_tag(interface, 'ingress_type' , i_type) add_value_from_tag(interface, 'ingress-type' , i_type)
resource_key = '/acl/interfaces/ingress[{:s}][{:s}]'.format( resource_key = '/acl/interfaces/ingress[{:s}][{:s}]'.format(
acl['name'], acl['type']) name['name'], acl['type'])
response.append((resource_key,interface)) response.append((resource_key,interface))
for xml_egress in xml_interface.xpath(XPATH_I_EGRESS, namespaces=NAMESPACES): for xml_egress in xml_interface.xpath(XPATH_I_EGRESS, namespaces=NAMESPACES):
e_name = xml_egress.find('ocacl:set_name_egress', namespaces=NAMESPACES) e_name = xml_egress.find('ocacl:set-name-egress', namespaces=NAMESPACES)
add_value_from_tag(interface, 'egress_set_name' , e_name) add_value_from_tag(interface, 'egress-set-name' , e_name)
e_type = xml_egress.find('ocacl:type_egress', namespaces=NAMESPACES) e_type = xml_egress.find('ocacl:type-egress', namespaces=NAMESPACES)
add_value_from_tag(interface, 'egress_type' , e_type) add_value_from_tag(interface, 'egress-type' , e_type)
resource_key = '/acl/interfaces/egress[{:s}][{:s}]'.format( resource_key = '/acl/interfaces/egress[{:s}][{:s}]'.format(
acl['name'], acl['type']) name['name'], acl['type'])
response.append((resource_key,interface)) response.append((resource_key,interface))
return response return response
...@@ -75,6 +75,10 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]: ...@@ -75,6 +75,10 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]:
component_location = xml_component.find('ocp:state/ocp:location', namespaces=NAMESPACES) component_location = xml_component.find('ocp:state/ocp:location', namespaces=NAMESPACES)
if not component_location is None: if not component_location is None:
add_value_from_tag(inventory['attributes'], 'location', component_location) add_value_from_tag(inventory['attributes'], 'location', component_location)
component_id = xml_component.find('ocp:state/ocp:id', namespaces=NAMESPACES)
if not component_id is None:
add_value_from_tag(inventory['attributes'], 'id', component_id)
component_type = xml_component.find('ocp:state/ocp:type', namespaces=NAMESPACES) component_type = xml_component.find('ocp:state/ocp:type', namespaces=NAMESPACES)
if component_type is not None: if component_type is not None:
...@@ -109,7 +113,7 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]: ...@@ -109,7 +113,7 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]:
component_mfg_name = xml_component.find('ocp:state/ocp:mfg-name', namespaces=NAMESPACES) component_mfg_name = xml_component.find('ocp:state/ocp:mfg-name', namespaces=NAMESPACES)
if not component_mfg_name is None: if not component_mfg_name is None:
add_value_from_tag(inventory['attributes'], 'manufacturer-name', component_mfg_name) add_value_from_tag(inventory['attributes'], 'mfg-name', component_mfg_name)
component_removable = xml_component.find('ocp:state/ocp:removable', namespaces=NAMESPACES) component_removable = xml_component.find('ocp:state/ocp:removable', namespaces=NAMESPACES)
if not component_removable is None: if not component_removable is None:
......
...@@ -23,6 +23,8 @@ XPATH_NETWORK_INSTANCES = "//ocni:network-instances/ocni:network-instance" ...@@ -23,6 +23,8 @@ XPATH_NETWORK_INSTANCES = "//ocni:network-instances/ocni:network-instance"
XPATH_NI_PROTOCOLS = ".//ocni:protocols/ocni:protocol" XPATH_NI_PROTOCOLS = ".//ocni:protocols/ocni:protocol"
XPATH_NI_TABLE_CONNECTS = ".//ocni:table-connections/ocni:table-connection" XPATH_NI_TABLE_CONNECTS = ".//ocni:table-connections/ocni:table-connection"
XPATH_NI_INTERFACE = ".//ocni:interfaces/ocni:interface"
XPATH_NI_IIP_AP = ".//ocni:inter-instance-policies/ocni:apply-policy" XPATH_NI_IIP_AP = ".//ocni:inter-instance-policies/ocni:apply-policy"
XPATH_NI_IIP_AP_IMPORT = ".//ocni:config/ocni:import-policy" XPATH_NI_IIP_AP_IMPORT = ".//ocni:config/ocni:import-policy"
XPATH_NI_IIP_AP_EXPORT = ".//ocni:config/ocni:export-policy" XPATH_NI_IIP_AP_EXPORT = ".//ocni:config/ocni:export-policy"
...@@ -136,6 +138,21 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]: ...@@ -136,6 +138,21 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]:
table_connection['address_family']) table_connection['address_family'])
response.append((resource_key, table_connection)) response.append((resource_key, table_connection))
for xml_interface in xml_network_instance.xpath(XPATH_NI_INTERFACE, namespaces=NAMESPACES):
LOGGER.info('xml_interfaces = {:s}'.format(str(ET.tostring(xml_interface))))
interface = {}
name_iface = xml_interface.find('ocni:config/ocni:interface', namespaces=NAMESPACES)
if name_iface is None or name_iface.text is None: continue
add_value_from_tag(interface, 'name_iface', name_iface)
name_subiface = xml_interface.find('ocni:config/ocni:subinterface', namespaces=NAMESPACES)
add_value_from_tag(interface, 'name_subiface', name_subiface)
resource_key = '/network_instance[{:s}]/interface[{:s}]'.format(
network_instance['name'], interface['name_iface'])
response.append((resource_key, interface))
for xml_iip_ap in xml_network_instance.xpath(XPATH_NI_IIP_AP, namespaces=NAMESPACES): for xml_iip_ap in xml_network_instance.xpath(XPATH_NI_IIP_AP, namespaces=NAMESPACES):
#LOGGER.info('xml_iip_ap = {:s}'.format(str(ET.tostring(xml_iip_ap)))) #LOGGER.info('xml_iip_ap = {:s}'.format(str(ET.tostring(xml_iip_ap))))
......
...@@ -27,6 +27,9 @@ from .NetworkInstances import parse as parse_network_instances ...@@ -27,6 +27,9 @@ from .NetworkInstances import parse as parse_network_instances
from .RoutingPolicy import parse as parse_routing_policy from .RoutingPolicy import parse as parse_routing_policy
from .Acl import parse as parse_acl from .Acl import parse as parse_acl
from .Inventory import parse as parse_inventory from .Inventory import parse as parse_inventory
from .acl.acl_adapter import acl_cr_to_dict
from .acl.acl_adapter_ipinfusion_proprietary import acl_cr_to_dict_ipinfusion_proprietary
LOGGER = logging.getLogger(__name__) LOGGER = logging.getLogger(__name__)
ALL_RESOURCE_KEYS = [ ALL_RESOURCE_KEYS = [
...@@ -112,15 +115,32 @@ def compose_config( # template generation ...@@ -112,15 +115,32 @@ def compose_config( # template generation
] ]
elif (message_renderer == "jinja"): elif (message_renderer == "jinja"):
templates =[] templates = []
template_name = '{:s}/edit_config.xml'.format(RE_REMOVE_FILTERS.sub('', resource_key))
templates.append(JINJA_ENV.get_template(template_name))
if "acl_ruleset" in resource_key: # MANAGING ACLs if "acl_ruleset" in resource_key: # MANAGING ACLs
templates =[] if vendor == 'ipinfusion': # ipinfusion proprietary netconf receipe is used temporarily
templates.append(JINJA_ENV.get_template('acl/acl-set/acl-entry/edit_config.xml')) enable_ingress_filter_path = 'acl/interfaces/ingress/enable_ingress_filter.xml'
templates.append(JINJA_ENV.get_template('acl/interfaces/ingress/edit_config.xml')) acl_entry_path = 'acl/acl-set/acl-entry/edit_config_ipinfusion_proprietary.xml'
data : Dict[str, Any] = json.loads(resource_value) acl_ingress_path = 'acl/interfaces/ingress/edit_config_ipinfusion_proprietary.xml'
data : Dict[str, Any] = acl_cr_to_dict_ipinfusion_proprietary(resource_value, delete=delete)
else:
enable_ingress_filter_path = 'acl/interfaces/ingress/enable_ingress_filter.xml'
acl_entry_path = 'acl/acl-set/acl-entry/edit_config.xml'
acl_ingress_path = 'acl/interfaces/ingress/edit_config.xml'
data : Dict[str, Any] = acl_cr_to_dict(resource_value, delete=delete)
if delete: # unpair acl and interface before removing acl
templates.append(JINJA_ENV.get_template(acl_ingress_path))
templates.append(JINJA_ENV.get_template(acl_entry_path))
templates.append(JINJA_ENV.get_template(enable_ingress_filter_path))
else:
templates.append(JINJA_ENV.get_template(enable_ingress_filter_path))
templates.append(JINJA_ENV.get_template(acl_entry_path))
templates.append(JINJA_ENV.get_template(acl_ingress_path))
else:
template_name = '{:s}/edit_config.xml'.format(RE_REMOVE_FILTERS.sub('', resource_key))
templates.append(JINJA_ENV.get_template(template_name))
data : Dict[str, Any] = json.loads(resource_value)
operation = 'delete' if delete else 'merge' # others operation = 'delete' if delete else 'merge' # others
#operation = 'delete' if delete else '' # ipinfusion? #operation = 'delete' if delete else '' # ipinfusion?
......
# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
\ No newline at end of file
<acl xmlns="http://www.ipinfusion.com/yang/ocnos/ipi-acl">
<acl-sets>
<acl-set {% if operation == 'delete' %}operation="delete"{% endif %}>
<name>{{name}}</name>
{% if type is defined %}<type>{{type}}</type>{% endif %}
<config>
<name>{{name}}</name>
{% if type is defined %}<type>{{type}}</type>{% endif %}
</config>
{% if operation != 'delete' %}
<acl-entries>
<acl-entry>
<sequence-id>{{sequence_id}}</sequence-id>
<config>
<sequence-id>{{sequence_id}}</sequence-id>
</config>
<ipv4>
<config>
<source-address>{{source_address}}</source-address>
<destination-address>{{destination_address}}</destination-address>
<dscp>{{dscp}}</dscp>
<protocol-tcp />
<tcp-source-port>{{source_port}}</tcp-source-port>
<tcp-destination-port>{{destination_port}}</tcp-destination-port>
<tcp-flags>{{tcp_flags}}</tcp-flags>
<forwarding-action>{{forwarding_action}}</forwarding-action>
</config>
</ipv4>
</acl-entry>
</acl-entries>
{% endif %}
</acl-set>
</acl-sets>
</acl>
\ No newline at end of file
# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, TypedDict
from ..ACL.ACL_multivendor import RULE_TYPE_MAPPING, FORWARDING_ACTION_MAPPING, LOG_ACTION_MAPPING
class ACLRequestData(TypedDict):
name: str # acl-set name
type: str # acl-set type
sequence_id: int # acl-entry sequence-id
source_address: str
destination_address: str
forwarding_action: str
id: str # interface id
interface: str
subinterface: int
set_name_ingress: str # ingress-acl-set name
type_ingress: str # ingress-acl-set type
all: bool
dscp: int
protocol: int
tcp_flags: str
source_port: int
destination_port: int
def acl_cr_to_dict(acl_cr_dict: Dict, subinterface:int = 0) -> Dict:
rule_set = acl_cr_dict['rule_set']
rule_set_entry = rule_set['entries'][0]
rule_set_entry_match = rule_set_entry['match']
rule_set_entry_action = rule_set_entry['action']
name: str = rule_set['name']
type: str = RULE_TYPE_MAPPING[rule_set["type"]]
sequence_id = rule_set_entry['sequence_id']
source_address = rule_set_entry_match['src_address']
destination_address = rule_set_entry_match['dst_address']
forwarding_action: str = FORWARDING_ACTION_MAPPING[rule_set_entry_action['forward_action']]
interface_id = acl_cr_dict['interface']
interface = interface_id
set_name_ingress = name
type_ingress = type
return ACLRequestData(
name=name,
type=type,
sequence_id=sequence_id,
source_address=source_address,
destination_address=destination_address,
forwarding_action=forwarding_action,
id=interface_id,
interface=interface,
# subinterface=subinterface,
set_name_ingress=set_name_ingress,
type_ingress=type_ingress,
all=True,
dscp=18,
protocol=6,
tcp_flags='TCP_SYN',
source_port=22,
destination_port=80
)
# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, TypedDict
RULE_TYPE_MAPPING = {
'ACLRULETYPE_IPV4' : 'ip',
}
FORWARDING_ACTION_MAPPING = {
'ACLFORWARDINGACTION_DROP' : 'deny',
'ACLFORWARDINGACTION_ACCEPT' : 'permit',
}
class ACLRequestData(TypedDict):
name: str # acl-set name
type: str # acl-set type
sequence_id: int # acl-entry sequence-id
source_address: str
destination_address: str
forwarding_action: str
interface: str
dscp: int
tcp_flags: str
source_port: int
destination_port: int
def acl_cr_to_dict_ipinfusion_proprietary(acl_cr_dict: Dict, delete: bool = False) -> Dict:
rule_set = acl_cr_dict['rule_set']
name: str = rule_set['name']
type: str = RULE_TYPE_MAPPING[rule_set["type"]]
interface = acl_cr_dict['interface'][5:] # remove preceding `PORT-` characters
if delete:
return ACLRequestData(name=name, type=type, interface=interface)
rule_set_entry = rule_set['entries'][0]
rule_set_entry_match = rule_set_entry['match']
rule_set_entry_action = rule_set_entry['action']
return ACLRequestData(
name=name,
type=type,
sequence_id=rule_set_entry['sequence_id'],
source_address=rule_set_entry_match['src_address'],
destination_address=rule_set_entry_match['dst_address'],
forwarding_action=FORWARDING_ACTION_MAPPING[rule_set_entry_action['forward_action']],
interface=interface,
dscp=rule_set_entry_match["dscp"],
tcp_flags=rule_set_entry_match["flags"],
source_port=rule_set_entry_match['src_port'],
destination_port=rule_set_entry_match['dst_port']
)