Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • tfs/controller
1 result
Show changes
Commits on Source (51)
Showing
with 790 additions and 40 deletions
...@@ -54,6 +54,8 @@ include: ...@@ -54,6 +54,8 @@ include:
- local: '/src/qos_profile/.gitlab-ci.yml' - local: '/src/qos_profile/.gitlab-ci.yml'
- local: '/src/vnt_manager/.gitlab-ci.yml' - local: '/src/vnt_manager/.gitlab-ci.yml'
- local: '/src/e2e_orchestrator/.gitlab-ci.yml' - local: '/src/e2e_orchestrator/.gitlab-ci.yml'
- local: '/src/ztp_server/.gitlab-ci.yml'
- local: '/src/osm_client/.gitlab-ci.yml'
# This should be last one: end-to-end integration tests # This should be last one: end-to-end integration tests
- local: '/src/tests/.gitlab-ci.yml' - local: '/src/tests/.gitlab-ci.yml'
...@@ -19,6 +19,7 @@ package context; ...@@ -19,6 +19,7 @@ package context;
import "google/protobuf/any.proto"; import "google/protobuf/any.proto";
import "acl.proto"; import "acl.proto";
import "ip_link.proto";
import "kpi_sample_types.proto"; import "kpi_sample_types.proto";
service ContextService { service ContextService {
...@@ -78,7 +79,7 @@ service ContextService { ...@@ -78,7 +79,7 @@ service ContextService {
rpc RemoveConnection (ConnectionId ) returns ( Empty ) {} rpc RemoveConnection (ConnectionId ) returns ( Empty ) {}
rpc GetConnectionEvents(Empty ) returns (stream ConnectionEvent ) {} rpc GetConnectionEvents(Empty ) returns (stream ConnectionEvent ) {}
// ------------------------------ Experimental ----------------------------- // ------------------------------ Experimental -----------------------------
rpc GetOpticalConfig (Empty ) returns (OpticalConfigList) {} rpc GetOpticalConfig (Empty ) returns (OpticalConfigList) {}
rpc SetOpticalConfig (OpticalConfig ) returns (OpticalConfigId ) {} rpc SetOpticalConfig (OpticalConfig ) returns (OpticalConfigId ) {}
...@@ -203,7 +204,7 @@ message Component { // Defined previously in this sectio ...@@ -203,7 +204,7 @@ message Component { // Defined previously in this sectio
Uuid component_uuid = 1; Uuid component_uuid = 1;
string name = 2; string name = 2;
string type = 3; string type = 3;
map<string, string> attributes = 4; // dict[attr.name => json.dumps(attr.value)] map<string, string> attributes = 4; // dict[attr.name => json.dumps(attr.value)]
string parent = 5; string parent = 5;
} }
...@@ -331,6 +332,7 @@ enum ServiceTypeEnum { ...@@ -331,6 +332,7 @@ enum ServiceTypeEnum {
SERVICETYPE_L1NM = 8; SERVICETYPE_L1NM = 8;
SERVICETYPE_INT = 9; SERVICETYPE_INT = 9;
SERVICETYPE_ACL = 10; SERVICETYPE_ACL = 10;
SERVICETYPE_IPLINK = 11;
} }
enum ServiceStatusEnum { enum ServiceStatusEnum {
...@@ -544,11 +546,17 @@ message ConfigRule_ACL { ...@@ -544,11 +546,17 @@ message ConfigRule_ACL {
acl.AclRuleSet rule_set = 2; acl.AclRuleSet rule_set = 2;
} }
message ConfigRule_IP_LINK {
EndPointId endpoint_id = 1;
ip_link.IpLinkRuleSet rule_set = 2;
}
message ConfigRule { message ConfigRule {
ConfigActionEnum action = 1; ConfigActionEnum action = 1;
oneof config_rule { oneof config_rule {
ConfigRule_Custom custom = 2; ConfigRule_Custom custom = 2;
ConfigRule_ACL acl = 3; ConfigRule_ACL acl = 3;
ConfigRule_IP_LINK ip_link = 4;
} }
} }
...@@ -579,7 +587,7 @@ message Location { ...@@ -579,7 +587,7 @@ message Location {
oneof location { oneof location {
string region = 1; string region = 1;
GPS_Position gps_position = 2; GPS_Position gps_position = 2;
string interface=3; string interface=3;
string circuit_pack=4; string circuit_pack=4;
} }
...@@ -711,7 +719,7 @@ message OpticalLinkDetails { ...@@ -711,7 +719,7 @@ message OpticalLinkDetails {
string dst_port = 3; string dst_port = 3;
string local_peer_port = 4; string local_peer_port = 4;
string remote_peer_port = 5 ; string remote_peer_port = 5 ;
bool used = 6 ; bool used = 6 ;
map<string, int32> c_slots = 7; map<string, int32> c_slots = 7;
map<string, int32> l_slots = 8; map<string, int32> l_slots = 8;
map<string, int32> s_slots = 9; map<string, int32> s_slots = 9;
......
// Copyright 2022-2025 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package ip_link;
message IpLinkRuleSet {
string ip = 1;
string mask = 3;
string vlan = 4;
}
...@@ -92,4 +92,14 @@ def json_service_p4_planned( ...@@ -92,4 +92,14 @@ def json_service_p4_planned(
return json_service( return json_service(
service_uuid, ServiceTypeEnum.SERVICETYPE_L1NM, context_id=json_context_id(context_uuid), service_uuid, ServiceTypeEnum.SERVICETYPE_L1NM, context_id=json_context_id(context_uuid),
status=ServiceStatusEnum.SERVICESTATUS_PLANNED, endpoint_ids=endpoint_ids, constraints=constraints, status=ServiceStatusEnum.SERVICESTATUS_PLANNED, endpoint_ids=endpoint_ids, constraints=constraints,
config_rules=config_rules)
def json_service_iplink_planned(
service_uuid : str, endpoint_ids : List[Dict] = [], constraints : List[Dict] = [],
config_rules : List[Dict] = [], context_uuid : str = DEFAULT_CONTEXT_NAME
):
return json_service(
service_uuid, ServiceTypeEnum.SERVICETYPE_IPLINK, context_id=json_context_id(context_uuid),
status=ServiceStatusEnum.SERVICESTATUS_PLANNED, endpoint_ids=endpoint_ids, constraints=constraints,
config_rules=config_rules) config_rules=config_rules)
\ No newline at end of file
...@@ -107,6 +107,7 @@ def validate_service_type_enum(message): ...@@ -107,6 +107,7 @@ def validate_service_type_enum(message):
'SERVICETYPE_TAPI_CONNECTIVITY_SERVICE', 'SERVICETYPE_TAPI_CONNECTIVITY_SERVICE',
'SERVICETYPE_TE', 'SERVICETYPE_TE',
'SERVICETYPE_E2E', 'SERVICETYPE_E2E',
'SERVICETYPE_IPLINK'
'SERVICETYPE_OPTICAL_CONNECTIVITY', 'SERVICETYPE_OPTICAL_CONNECTIVITY',
'SERVICETYPE_QKD', 'SERVICETYPE_QKD',
] ]
...@@ -146,6 +147,7 @@ def validate_uuid(message, allow_empty=False): ...@@ -146,6 +147,7 @@ def validate_uuid(message, allow_empty=False):
CONFIG_RULE_TYPES = { CONFIG_RULE_TYPES = {
'custom', 'custom',
'acl', 'acl',
'ip_link'
} }
def validate_config_rule(message): def validate_config_rule(message):
assert isinstance(message, dict) assert isinstance(message, dict)
......
...@@ -71,6 +71,9 @@ def compose_config_rules_data( ...@@ -71,6 +71,9 @@ def compose_config_rules_data(
_, _, endpoint_uuid = endpoint_get_uuid(config_rule.acl.endpoint_id, allow_random=False) _, _, endpoint_uuid = endpoint_get_uuid(config_rule.acl.endpoint_id, allow_random=False)
rule_set_name = config_rule.acl.rule_set.name rule_set_name = config_rule.acl.rule_set.name
configrule_name = '{:s}:{:s}:{:s}:{:s}'.format(parent_kind, kind.value, endpoint_uuid, rule_set_name) configrule_name = '{:s}:{:s}:{:s}:{:s}'.format(parent_kind, kind.value, endpoint_uuid, rule_set_name)
elif kind == ConfigRuleKindEnum.IP_LINK:
_, _, endpoint_uuid = endpoint_get_uuid(config_rule.ip_link.endpoint_id, allow_random=False)
configrule_name = '{:s}:{:s}:{:s}'.format(parent_kind, kind.value, endpoint_uuid)
else: else:
MSG = 'Name for ConfigRule({:s}) cannot be inferred '+\ MSG = 'Name for ConfigRule({:s}) cannot be inferred '+\
'(device_uuid={:s}, service_uuid={:s}, slice_uuid={:s})' '(device_uuid={:s}, service_uuid={:s}, slice_uuid={:s})'
......
...@@ -88,6 +88,8 @@ def service_set(db_engine : Engine, messagebroker : MessageBroker, request : Ser ...@@ -88,6 +88,8 @@ def service_set(db_engine : Engine, messagebroker : MessageBroker, request : Ser
service_type = grpc_to_enum__service_type(request.service_type) service_type = grpc_to_enum__service_type(request.service_type)
if service_type is None and request.service_type == ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY: if service_type is None and request.service_type == ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY:
service_type = "OPTICAL_CONNECTIVITY" service_type = "OPTICAL_CONNECTIVITY"
if service_type is None and request.service_type == ServiceTypeEnum.SERVICETYPE_IPLINK :
service_type = "IP_LINK"
service_status = grpc_to_enum__service_status(request.service_status.service_status) service_status = grpc_to_enum__service_status(request.service_status.service_status)
......
...@@ -21,8 +21,9 @@ from ._Base import _Base ...@@ -21,8 +21,9 @@ from ._Base import _Base
# Enum values should match name of field in ConfigRule message # Enum values should match name of field in ConfigRule message
class ConfigRuleKindEnum(enum.Enum): class ConfigRuleKindEnum(enum.Enum):
CUSTOM = 'custom' CUSTOM = 'custom'
ACL = 'acl' ACL = 'acl'
IP_LINK = 'ip_link'
class DeviceConfigRuleModel(_Base): class DeviceConfigRuleModel(_Base):
__tablename__ = 'device_configrule' __tablename__ = 'device_configrule'
......
...@@ -30,6 +30,7 @@ class ORM_ServiceTypeEnum(enum.Enum): ...@@ -30,6 +30,7 @@ class ORM_ServiceTypeEnum(enum.Enum):
TE = ServiceTypeEnum.SERVICETYPE_TE TE = ServiceTypeEnum.SERVICETYPE_TE
E2E = ServiceTypeEnum.SERVICETYPE_E2E E2E = ServiceTypeEnum.SERVICETYPE_E2E
OPTICAL_CONNECTIVITY = ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY OPTICAL_CONNECTIVITY = ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY
IP_LINK = ServiceTypeEnum.SERVICETYPE_IPLINK
QKD = ServiceTypeEnum.SERVICETYPE_QKD QKD = ServiceTypeEnum.SERVICETYPE_QKD
INT = ServiceTypeEnum.SERVICETYPE_INT INT = ServiceTypeEnum.SERVICETYPE_INT
ACL = ServiceTypeEnum.SERVICETYPE_ACL ACL = ServiceTypeEnum.SERVICETYPE_ACL
......
...@@ -312,7 +312,13 @@ def compute_rules_to_add_delete( ...@@ -312,7 +312,13 @@ def compute_rules_to_add_delete(
ACL_KEY_TEMPLATE = '/device[{:s}]/endpoint[{:s}]/acl_ruleset[{:s}]' ACL_KEY_TEMPLATE = '/device[{:s}]/endpoint[{:s}]/acl_ruleset[{:s}]'
key_or_path = ACL_KEY_TEMPLATE.format(device_uuid, endpoint_uuid, acl_ruleset_name) key_or_path = ACL_KEY_TEMPLATE.format(device_uuid, endpoint_uuid, acl_ruleset_name)
context_config_rules[key_or_path] = grpc_message_to_json(config_rule.acl) # get the resource value of the acl context_config_rules[key_or_path] = grpc_message_to_json(config_rule.acl) # get the resource value of the acl
elif config_rule_kind == 'ip_link':
device_uuid = config_rule.ip_link.endpoint_id.device_id.device_uuid.uuid # get the device name
endpoint_uuid = config_rule.ip_link.endpoint_id.endpoint_uuid.uuid # get the endpoint name request_config_rules = []
ip_link_ruleset_name = config_rule.ip_link.rule_set.name # get the ip_link name
IP_LINK_KEY_TEMPLATE = '/device[{:s}]/endpoint[{:s}]/ip_link_ruleset[{:s}]'
key_or_path = IP_LINK_KEY_TEMPLATE.format(device_uuid, endpoint_uuid, ip_link_ruleset_name)
context_config_rules[key_or_path] = grpc_message_to_json(config_rule.ip_link) # get the resource value of the ip_link
request_config_rules = [] request_config_rules = []
for config_rule in request.device_config.config_rules: for config_rule in request.device_config.config_rules:
config_rule_kind = config_rule.WhichOneof('config_rule') config_rule_kind = config_rule.WhichOneof('config_rule')
...@@ -329,6 +335,14 @@ def compute_rules_to_add_delete( ...@@ -329,6 +335,14 @@ def compute_rules_to_add_delete(
request_config_rules.append(( request_config_rules.append((
config_rule.action, key_or_path, grpc_message_to_json(config_rule.acl) config_rule.action, key_or_path, grpc_message_to_json(config_rule.acl)
)) ))
elif config_rule_kind == 'ip_link': # resource management of "ip_link" rule
device_uuid = config_rule.ip_link.endpoint_id.device_id.device_uuid.uuid
endpoint_uuid = config_rule.ip_link.endpoint_id.endpoint_uuid.uuid
IP_LINK_KEY_TEMPLATE = '/device[{:s}]/endpoint[{:s}]/ip_link_ruleset'
key_or_path = IP_LINK_KEY_TEMPLATE.format(device_uuid, endpoint_uuid)
request_config_rules.append((
config_rule.action, key_or_path, grpc_message_to_json(config_rule.ip_link)
))
resources_to_set : List[Tuple[str, Any]] = [] # key, value resources_to_set : List[Tuple[str, Any]] = [] # key, value
resources_to_delete : List[Tuple[str, Any]] = [] # key, value resources_to_delete : List[Tuple[str, Any]] = [] # key, value
......
...@@ -12,9 +12,9 @@ ...@@ -12,9 +12,9 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import time
import json import json
import logging, pytz, queue, re, threading import logging, pytz, re, threading
#import lxml.etree as ET
from typing import Any, List, Tuple, Union from typing import Any, List, Tuple, Union
from apscheduler.executors.pool import ThreadPoolExecutor from apscheduler.executors.pool import ThreadPoolExecutor
from apscheduler.jobstores.memory import MemoryJobStore from apscheduler.jobstores.memory import MemoryJobStore
...@@ -22,12 +22,11 @@ from apscheduler.schedulers.background import BackgroundScheduler ...@@ -22,12 +22,11 @@ from apscheduler.schedulers.background import BackgroundScheduler
from ncclient.manager import Manager, connect_ssh from ncclient.manager import Manager, connect_ssh
from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method
from common.tools.client.RetryDecorator import delay_exponential from common.tools.client.RetryDecorator import delay_exponential
from common.type_checkers.Checkers import chk_type from common.type_checkers.Checkers import chk_length, chk_string, chk_type
from device.service.driver_api.Exceptions import UnsupportedResourceKeyException from device.service.driver_api.Exceptions import UnsupportedResourceKeyException
from device.service.driver_api._Driver import _Driver from device.service.driver_api._Driver import _Driver
from device.service.driver_api.AnyTreeTools import TreeNode from .templates import compose_config, cli_compose_config, ufi_interface, cisco_interface
from .templates.VPN.common import seperate_port_config from .templates.VPN.common import seperate_port_config
#from .Tools import xml_pretty_print, xml_to_dict, xml_to_file
from .templates.VPN.roadms import ( from .templates.VPN.roadms import (
create_optical_band, disable_media_channel, delete_optical_band, create_media_channel_v2 create_optical_band, disable_media_channel, delete_optical_band, create_media_channel_v2
) )
...@@ -36,11 +35,10 @@ from .RetryDecorator import retry ...@@ -36,11 +35,10 @@ from .RetryDecorator import retry
from context.client.ContextClient import ContextClient from context.client.ContextClient import ContextClient
from common.proto.context_pb2 import OpticalConfig from common.proto.context_pb2 import OpticalConfig
from .templates.discovery_tool.transponders import transponder_values_extractor from .templates.discovery_tool.transponders import transponder_values_extractor
from .templates.discovery_tool.roadms import roadm_values_extractor, extract_media_channels from .templates.discovery_tool.roadms import roadm_values_extractor
from .templates.discovery_tool.open_roadm import openroadm_values_extractor from .templates.discovery_tool.open_roadm import openroadm_values_extractor
from .templates.VPN.openroadm import network_media_channel_handler from .templates.VPN.openroadm import network_media_channel_handler
DEBUG_MODE = False DEBUG_MODE = False
logging.getLogger('ncclient.manager').setLevel(logging.DEBUG if DEBUG_MODE else logging.WARNING) logging.getLogger('ncclient.manager').setLevel(logging.DEBUG if DEBUG_MODE else logging.WARNING)
logging.getLogger('ncclient.transport.ssh').setLevel(logging.DEBUG if DEBUG_MODE else logging.WARNING) logging.getLogger('ncclient.transport.ssh').setLevel(logging.DEBUG if DEBUG_MODE else logging.WARNING)
...@@ -51,9 +49,6 @@ logging.getLogger('monitoring-client').setLevel(logging.INFO if DEBUG_MODE else ...@@ -51,9 +49,6 @@ logging.getLogger('monitoring-client').setLevel(logging.INFO if DEBUG_MODE else
RE_GET_ENDPOINT_FROM_INTERFACE_KEY = re.compile(r'.*interface\[([^\]]+)\].*') RE_GET_ENDPOINT_FROM_INTERFACE_KEY = re.compile(r'.*interface\[([^\]]+)\].*')
RE_GET_ENDPOINT_FROM_INTERFACE_XPATH = re.compile(r".*interface\[oci\:name\='([^\]]+)'\].*") RE_GET_ENDPOINT_FROM_INTERFACE_XPATH = re.compile(r".*interface\[oci\:name\='([^\]]+)'\].*")
# Collection of samples through NetConf is very slow and each request collects all the data.
# Populate a cache periodically (when first interface is interrogated).
# Evict data after some seconds, when data is considered as outdated
SAMPLE_EVICTION_SECONDS = 30.0 # seconds SAMPLE_EVICTION_SECONDS = 30.0 # seconds
SAMPLE_RESOURCE_KEY = 'interfaces/interface/state/counters' SAMPLE_RESOURCE_KEY = 'interfaces/interface/state/counters'
...@@ -200,6 +195,65 @@ def edit_config( ...@@ -200,6 +195,65 @@ def edit_config(
#results[i] = True #results[i] = True
results.append(result) results.append(result)
if netconf_handler.vendor == "CISCO":
if "L2VSI" in resources[0][1]:
#Configure by CLI
logger.warning("CLI Configuration")
cli_compose_config(resources, delete=delete, host= netconf_handler._NetconfSessionHandler__address, user=netconf_handler._NetconfSessionHandler__username, passw=netconf_handler._NetconfSessionHandler__password)
for i,resource in enumerate(resources):
results.append(True)
else:
logger.warning("CLI Configuration CISCO INTERFACE")
cisco_interface(resources, delete=delete, host= netconf_handler._NetconfSessionHandler__address, user=netconf_handler._NetconfSessionHandler__username, passw=netconf_handler._NetconfSessionHandler__password)
for i,resource in enumerate(resources):
results.append(True)
elif netconf_handler.vendor == "UFISPACE":
#Configure by CLI
logger.warning("CLI Configuration: {:s}".format(resources))
ufi_interface(resources, delete=delete, host= netconf_handler._NetconfSessionHandler__address, user=netconf_handler._NetconfSessionHandler__username, passw=netconf_handler._NetconfSessionHandler__password)
for i,resource in enumerate(resources):
results.append(True)
else:
for i,resource in enumerate(resources):
str_resource_name = 'resources[#{:d}]'.format(i)
try:
logger.debug('[{:s}] resource = {:s}'.format(str_method, str(resource)))
chk_type(str_resource_name, resource, (list, tuple))
chk_length(str_resource_name, resource, min_length=2, max_length=2)
resource_key,resource_value = resource
chk_string(str_resource_name + '.key', resource_key, allow_empty=False)
str_config_messages = compose_config( # get template for configuration
resource_key, resource_value, delete=delete, vendor=netconf_handler.vendor, message_renderer=netconf_handler.message_renderer)
for str_config_message in str_config_messages: # configuration of the received templates
if str_config_message is None: raise UnsupportedResourceKeyException(resource_key)
logger.debug('[{:s}] str_config_message[{:d}] = {:s}'.format(
str_method, len(str_config_message), str(str_config_message)))
netconf_handler.edit_config( # configure the device
config=str_config_message, target=target, default_operation=default_operation,
test_option=test_option, error_option=error_option, format=format)
if commit_per_rule:
netconf_handler.commit() # configuration commit
if 'table_connections' in resource_key:
time.sleep(5) # CPU usage might exceed critical level after route redistribution, BGP daemon needs time to reload
#results[i] = True
results.append(True)
except Exception as e: # pylint: disable=broad-except
str_operation = 'preparing' if target == 'candidate' else ('deleting' if delete else 'setting')
msg = '[{:s}] Exception {:s} {:s}: {:s}'
logger.exception(msg.format(str_method, str_operation, str_resource_name, str(resource)))
#results[i] = e # if validation fails, store the exception
results.append(e)
if not commit_per_rule:
try:
netconf_handler.commit()
except Exception as e: # pylint: disable=broad-except
msg = '[{:s}] Exception committing: {:s}'
str_operation = 'preparing' if target == 'candidate' else ('deleting' if delete else 'setting')
logger.exception(msg.format(str_method, str_operation, str(resources)))
results = [e for _ in resources] # if commit fails, set exception in each resource
return results return results
......
# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from yattag import Doc, indent
def ip_link_mgmt(data,vendor, delete):
doc, tag, text = Doc().tagtext()
ID = data['endpoint_id']['endpoint_uuid']['uuid']
DATA = data["rule_set"]
with tag('interfaces', xmlns="http://openconfig.net/yang/interfaces"):
if delete == True:
with tag('interface' ,'xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0" nc:operation="delete"'):
with tag('name'):text(ID)
else:
with tag('interface'):
with tag('name'):text(ID)
with tag('config'):
with tag('name'):text(ID)
with tag('type', 'xmlns:ianaift="urn:ietf:params:xml:ns:yang:iana-if-type"'):text('ianaift:l3ipvlan')
with tag('enabled'):text('true')
with tag('subinterfaces'):
with tag('subinterface'):
if vendor is None or vendor == 'ADVA':
with tag('index'): text('0')
with tag('config'):
with tag('index'): text('0')
if vendor == 'ADVA' and not 'vlan'in data:
with tag('untagged-allowed', 'xmlns="http://www.advaoptical.com/cim/adva-dnos-oc-interfaces"'):text('true')
with tag('vlan', xmlns="http://openconfig.net/yang/vlan"):
with tag('match'):
with tag('single-tagged'):
with tag('config'):
with tag('vlan-id'):text(DATA['vlan'])
with tag('ipv4', xmlns="http://openconfig.net/yang/interfaces/ip"):
with tag('addresses'):
with tag('address'):
with tag('ip'):text(DATA['ip'])
with tag('config'):
with tag('ip'):text(DATA['ip'])
with tag('prefix-length'):text(DATA['mask'])
result = indent(
doc.getvalue(),
indentation = ' '*2,
newline = '\r\n'
)
return result
\ No newline at end of file
...@@ -13,12 +13,10 @@ ...@@ -13,12 +13,10 @@
# limitations under the License. # limitations under the License.
import re,logging import re,logging
import json
import lxml.etree as ET import lxml.etree as ET
from typing import Collection, Dict, Any from typing import Collection, Dict
from .IP_LINK.IP_LINK_multivendor import ip_link_mgmt
from yattag import Doc, indent
from .VPN.physical import create_optical_channel
def add_value_from_tag(target : Dict, field_name: str, field_value : ET.Element, cast=None) -> None: def add_value_from_tag(target : Dict, field_name: str, field_value : ET.Element, cast=None) -> None:
if isinstance(field_value,str) or field_value is None or field_value.text is None: return if isinstance(field_value,str) or field_value is None or field_value.text is None: return
...@@ -49,14 +47,12 @@ def add_value_from_collection(target : Dict, field_name: str, field_value : Coll ...@@ -49,14 +47,12 @@ def add_value_from_collection(target : Dict, field_name: str, field_value : Coll
# Return: # Return:
[dict] Set of templates generated according to the configuration rule [dict] Set of templates generated according to the configuration rule
""" """
def generate_templates(resource_key: str, resource_value: str, channel:str) -> str: # template management to be configured def generate_templates(resource_key: str, resource_value: str, delete: bool,vendor:str) -> str: # template management to be configured
result_templates = [] result_templates = []
data={} list_resource_key = resource_key.split("/") # the rule resource key management
data['name']=channel if "ip_link" in list_resource_key[1]: # network instance rules management
data['resource_key']=resource_key result_templates.append(ip_link_mgmt(resource_value,vendor,delete))
data['value']=resource_value
#result_templates.append(create_physical_config(data))
return result_templates return result_templates
......
...@@ -12,3 +12,288 @@ ...@@ -12,3 +12,288 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import json, logging, lxml.etree as ET, re
import time
from typing import Any, Dict, Optional
from jinja2 import Environment, PackageLoader, select_autoescape
import paramiko
from .Tools import generate_templates
LOGGER = logging.getLogger(__name__)
LOGGER = logging.getLogger(__name__)
RE_REMOVE_FILTERS = re.compile(r'\[[^\]]+\]')
RE_REMOVE_FILTERS_2 = re.compile(r'\/[a-z]+:')
EMPTY_CONFIG = '<config></config>'
EMPTY_FILTER = '<filter></filter>'
JINJA_ENV = Environment(loader=PackageLoader('device.service.drivers.openconfig'), autoescape=select_autoescape())
"""
# Method Name: compose_config
# Parameters:
- resource_key: [str] Variable to identify the rule to be executed.
- resource_value: [str] Variable with the configuration parameters of the rule to be executed.
- delete: [bool] Variable to identify whether to create or delete the rule.
- vendor: [str] Variable to identify the vendor of the equipment to be configured.
- message_renderer [str] Variable to dientify template generation method. Can be "jinja" or "pyangbind".
# Functionality:
This method calls the function obtains the equipment configuration template according to the value of the variable "message_renderer".
Depending on the value of this variable, it gets the template with "jinja" or "pyangbind".
# Return:
[dict] Set of templates obtained according to the configuration method
"""
def compose_config( # template generation
resource_key : str, resource_value : str, delete : bool = False, vendor : Optional[str] = None, message_renderer = str
) -> str:
if (message_renderer == "pyangbind"):
templates = (generate_templates(resource_key, resource_value, delete, vendor))
return [
'<config>{:s}</config>'.format(template) # format correction
for template in templates
]
elif (message_renderer == "jinja"):
templates = []
template_name = '{:s}/edit_config.xml'.format(RE_REMOVE_FILTERS.sub('', resource_key))
templates.append(JINJA_ENV.get_template(template_name))
data : Dict[str, Any] = json.loads(resource_value)
operation = 'delete' if delete else 'merge' # others
#operation = 'delete' if delete else '' # ipinfusion?
return [
'<config>{:s}</config>'.format(
template.render(**data, operation=operation, vendor=vendor).strip())
for template in templates
]
else:
raise ValueError('Invalid message_renderer value: {}'.format(message_renderer))
"""
# Method Name: cli_compose_config
# Parameters:
- resource_key: [str] Variable to identify the rule to be executed.
- resource_value: [str] Variable with the configuration parameters of the rule to be executed.
- delete: [bool] Variable to identify whether to create or delete the rule.
- vendor: [str] Variable to identify the vendor of the equipment to be configured.
- message_renderer [str] Variable to dientify template generation method. Can be "jinja" or "pyangbind".
# Functionality:
This method calls the function obtains the equipment configuration template according to the value of the variable "message_renderer".
Depending on the value of this variable, it gets the template with "jinja" or "pyangbind".
# Return:
[dict] Set of templates obtained according to the configuration method
"""
def cli_compose_config(resources, delete: bool, host: str, user: str, passw: str): #Method used for configuring via CLI directly L2VPN in CISCO devices
key_value_data = {}
for path, json_str in resources:
key_value_data[path] = json_str
# Iterate through the resources and extract parameter values dynamically
for path, json_str in resources:
data = json.loads(json_str)
if 'VC_ID' in data: vc_id = data['VC_ID']
if 'connection_point' in data: connection_point = data['connection_point']
if 'remote_system' in data: remote_system = data['remote_system']
if 'interface' in data:
interface = data['interface']
interface = interface.split("-") #New Line To Avoid Bad Endpoint Name In CISCO
interface = interface[1]
if 'vlan_id' in data: vlan_id = data['vlan_id']
if 'name' in data: ni_name = data['name']
if 'type' in data: ni_type = data['type']
if 'index' in data: subif_index = data['index']
if 'description' in data: description = data['description']
else: description = " "
# initialize the SSH client
ssh_client = paramiko.SSHClient()
ssh_client.load_system_host_keys()
# add to known hosts
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
ssh_client.connect(hostname=host, username=user, password=passw, look_for_keys=False)
#print("Connection successful")
LOGGER.warning("Connection successful")
except:
#print("[!] Cannot connect to the SSH Server")
LOGGER.warning("[!] Cannot connect to the SSH Server")
exit()
try:
# Open an SSH shell
channel = ssh_client.invoke_shell()
channel.send('enable\n')
time.sleep(1)
channel.send('conf term\n')
time.sleep(0.1)
channel.send(f"interface {interface} l2transport\n")
time.sleep(0.1)
channel.send('description l2vpn_vpws_example\n')
time.sleep(0.1)
channel.send(f"encapsulation dot1q {vlan_id}\n")
time.sleep(0.1)
channel.send('mtu 9088\n')
time.sleep(0.1)
channel.send('commit\n')
time.sleep(0.1)
channel.send('l2vpn\n')
time.sleep(0.1)
channel.send('load-balancing flow src-dst-ip\n')
time.sleep(0.1)
channel.send('pw-class l2vpn_vpws_profile_example\n')
time.sleep(0.1)
channel.send('encapsulation mpls\n')
time.sleep(0.1)
channel.send('transport-mode vlan passthrough\n')
time.sleep(0.1)
channel.send('control-word\n')
time.sleep(0.1)
channel.send('exit\n')
time.sleep(0.1)
channel.send('l2vpn\n')
time.sleep(0.1)
channel.send('xconnect group l2vpn_vpws_group_example\n')
time.sleep(0.1)
channel.send(f"p2p {ni_name}\n")
time.sleep(0.1)
channel.send(f"interface {interface}\n") #Ignore the VlanID because the interface already includes the vlanid tag
time.sleep(0.1)
channel.send(f"neighbor ipv4 {remote_system} pw-id {vc_id}\n")
time.sleep(0.1)
channel.send('pw-class l2vpn_vpws_profile_example\n')
time.sleep(0.1)
channel.send('exit\n')
time.sleep(0.1)
channel.send(f"description {description}\n")
time.sleep(0.1)
channel.send('commit\n')
time.sleep(0.1)
# Capturar la salida del comando
output = channel.recv(65535).decode('utf-8')
#print(output)
LOGGER.warning(output)
# Close the SSH shell
channel.close()
except Exception as e:
LOGGER.exception(f"Error with the CLI configuration: {e}")
# Close the SSH client
ssh_client.close()
def ufi_interface(resources, delete: bool, host: str, user: str, passw: str): #Method used for configuring via CLI directly L2VPN in CISCO devices
key_value_data = {}
for path, json_str in resources:
key_value_data[path] = json_str
# initialize the SSH client
ssh_client = paramiko.SSHClient()
ssh_client.load_system_host_keys()
# add to known hosts
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
ssh_client.connect(hostname=host, username=user, password=passw, look_for_keys=False)
LOGGER.warning("Connection successful")
except:
LOGGER.warning("[!] Cannot connect to the SSH Server")
exit()
interface = 'ge100-0/0/3/1'
ip = '1.1.1.1'
mask = '24'
vlan = '1212'
try:
# Open an SSH shell
channel = ssh_client.invoke_shell()
time.sleep(5)
channel.send('config\n')
time.sleep(1)
channel.send(f'interfaces {interface} \n')
time.sleep(1)
channel.send('admin-state enabled \n')
time.sleep(1)
channel.send(f'ipv4-address {ip}/{mask} \n')
time.sleep(1)
channel.send(f'vlan-id {vlan} \n')
time.sleep(1)
channel.send('commit\n')
time.sleep(1)
output = channel.recv(65535).decode('utf-8')
LOGGER.warning(output)
# Close the SSH shell
channel.close()
except Exception as e:
LOGGER.exception(f"Error with the CLI configuration: {e}")
# Close the SSH client
ssh_client.close()
def cisco_interface(resources, delete: bool, host: str, user: str, passw: str): #Method used for configuring via CLI directly L2VPN in CISCO devices
key_value_data = {}
for path, json_str in resources:
key_value_data[path] = json_str
# initialize the SSH client
ssh_client = paramiko.SSHClient()
ssh_client.load_system_host_keys()
# add to known hosts
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
ssh_client.connect(hostname=host, username=user, password=passw, look_for_keys=False)
LOGGER.warning("Connection successful")
except:
LOGGER.warning("[!] Cannot connect to the SSH Server")
exit()
interface = 'FourHundredGigE0/0/0/10.1212'
ip = '1.1.1.1'
mask = '24'
vlan = '1212'
try:
# Open an SSH shell
channel = ssh_client.invoke_shell()
time.sleep(1)
channel.send('config\n')
time.sleep(0.1)
channel.send(f'interface {interface} \n')
time.sleep(0.1)
channel.send('no shutdown\n')
time.sleep(0.1)
channel.send(f'ipv4 address {ip}/{mask} \n')
time.sleep(0.1)
channel.send(f'encapsulation dot1q {vlan} \n')
time.sleep(0.1)
channel.send('commit\n')
time.sleep(0.1)
output = channel.recv(65535).decode('utf-8')
LOGGER.warning(output)
# Close the SSH shell
channel.close()
except Exception as e:
LOGGER.exception(f"Error with the CLI configuration: {e}")
# Close the SSH client
ssh_client.close()
...@@ -30,7 +30,7 @@ from device.service.driver_api.Exceptions import UnsupportedResourceKeyException ...@@ -30,7 +30,7 @@ from device.service.driver_api.Exceptions import UnsupportedResourceKeyException
from device.service.driver_api._Driver import _Driver from device.service.driver_api._Driver import _Driver
from device.service.driver_api.AnyTreeTools import TreeNode, get_subnode, set_subnode_value #dump_subtree from device.service.driver_api.AnyTreeTools import TreeNode, get_subnode, set_subnode_value #dump_subtree
#from .Tools import xml_pretty_print, xml_to_dict, xml_to_file #from .Tools import xml_pretty_print, xml_to_dict, xml_to_file
from .templates import ALL_RESOURCE_KEYS, EMPTY_CONFIG, compose_config, get_filter, parse, cli_compose_config from .templates import ALL_RESOURCE_KEYS, EMPTY_CONFIG, compose_config, get_filter, parse, cli_compose_config, ufi_interface, cisco_interface
from .RetryDecorator import retry from .RetryDecorator import retry
DEBUG_MODE = False DEBUG_MODE = False
...@@ -212,10 +212,22 @@ def edit_config( ...@@ -212,10 +212,22 @@ def edit_config(
): ):
str_method = 'DeleteConfig' if delete else 'SetConfig' str_method = 'DeleteConfig' if delete else 'SetConfig'
results = [] results = []
if "L2VSI" in resources[0][1] and netconf_handler.vendor == "CISCO": if netconf_handler.vendor == "CISCO":
if "L2VSI" in resources[0][1]:
#Configure by CLI
logger.warning("CLI Configuration")
cli_compose_config(resources, delete=delete, host= netconf_handler._NetconfSessionHandler__address, user=netconf_handler._NetconfSessionHandler__username, passw=netconf_handler._NetconfSessionHandler__password)
for i,resource in enumerate(resources):
results.append(True)
else:
logger.warning("CLI Configuration CISCO INTERFACE")
cisco_interface(resources, delete=delete, host= netconf_handler._NetconfSessionHandler__address, user=netconf_handler._NetconfSessionHandler__username, passw=netconf_handler._NetconfSessionHandler__password)
for i,resource in enumerate(resources):
results.append(True)
elif netconf_handler.vendor == "UFISPACE":
#Configure by CLI #Configure by CLI
logger.warning("CLI Configuration") logger.warning("CLI Configuration: {:s}".format(resources))
cli_compose_config(resources, delete=delete, host= netconf_handler._NetconfSessionHandler__address, user=netconf_handler._NetconfSessionHandler__username, passw=netconf_handler._NetconfSessionHandler__password) ufi_interface(resources, delete=delete)
for i,resource in enumerate(resources): for i,resource in enumerate(resources):
results.append(True) results.append(True)
else: else:
......
# Copyright 2022-2025 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from yattag import Doc, indent
def ip_link_mgmt(data,vendor, delete):
doc, tag, text = Doc().tagtext()
ID = data['endpoint_id']['endpoint_uuid']['uuid']
DATA = data["rule_set"]
with tag('interfaces', xmlns="http://openconfig.net/yang/interfaces"):
if delete == True:
with tag('interface' ,'xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0" nc:operation="delete"'):
with tag('name'):text(ID)
else:
with tag('interface'):
with tag('name'):text(ID)
with tag('config'):
with tag('name'):text(ID)
with tag('type', 'xmlns:ianaift="urn:ietf:params:xml:ns:yang:iana-if-type"'):text('ianaift:l3ipvlan')
with tag('enabled'):text('true')
with tag('subinterfaces'):
with tag('subinterface'):
if vendor is None or vendor == 'ADVA':
with tag('index'): text('0')
with tag('config'):
with tag('index'): text('0')
if vendor == 'ADVA' and not 'vlan'in data:
with tag('untagged-allowed', 'xmlns="http://www.advaoptical.com/cim/adva-dnos-oc-interfaces"'):text('true')
with tag('vlan', xmlns="http://openconfig.net/yang/vlan"):
with tag('match'):
with tag('single-tagged'):
with tag('config'):
with tag('vlan-id'):text(DATA['vlan'])
with tag('ipv4', xmlns="http://openconfig.net/yang/interfaces/ip"):
with tag('addresses'):
with tag('address'):
with tag('ip'):text(DATA['ip'])
with tag('config'):
with tag('ip'):text(DATA['ip'])
with tag('prefix-length'):text(DATA['mask'])
result = indent(
doc.getvalue(),
indentation = ' '*2,
newline = '\r\n'
)
return result
\ No newline at end of file
...@@ -15,9 +15,10 @@ ...@@ -15,9 +15,10 @@
import json import json
import lxml.etree as ET import lxml.etree as ET
from typing import Collection, Dict, Any from typing import Collection, Dict, Any
from .ACL.ACL_multivendor import acl_mgmt from .ACL.ACL_multivendor import acl_mgmt
from .IP_LINK.IP_LINK_multivendor import ip_link_mgmt
from .VPN.Network_instance_multivendor import create_NI, associate_virtual_circuit, associate_RP_to_NI, add_protocol_NI, create_table_conns, associate_If_to_NI from .VPN.Network_instance_multivendor import create_NI, associate_virtual_circuit, associate_RP_to_NI, add_protocol_NI, create_table_conns, associate_If_to_NI
from .VPN.Interfaces_multivendor import create_If_SubIf from .VPN.Interfaces_multivendor import create_If_SubIf
from .VPN.Routing_policy import create_rp_def, create_rp_statement from .VPN.Routing_policy import create_rp_def, create_rp_statement
def add_value_from_tag(target : Dict, field_name: str, field_value : ET.Element, cast=None) -> None: def add_value_from_tag(target : Dict, field_name: str, field_value : ET.Element, cast=None) -> None:
...@@ -70,7 +71,7 @@ def generate_templates(resource_key: str, resource_value: str, delete: bool,vend ...@@ -70,7 +71,7 @@ def generate_templates(resource_key: str, resource_value: str, delete: bool,vend
else: else:
result_templates.append(create_NI(data,vendor,delete)) result_templates.append(create_NI(data,vendor,delete))
if "interface" in list_resource_key[1]: # interface rules management elif "interface" in list_resource_key[1]: # interface rules management
data: Dict[str, Any] = json.loads(resource_value) data: Dict[str, Any] = json.loads(resource_value)
#data['DEL'] = delete #data['DEL'] = delete
if "subinterface" in resource_key: if "subinterface" in resource_key:
...@@ -83,8 +84,10 @@ def generate_templates(resource_key: str, resource_value: str, delete: bool,vend ...@@ -83,8 +84,10 @@ def generate_templates(resource_key: str, resource_value: str, delete: bool,vend
result_templates.append(create_rp_def(data, delete)) result_templates.append(create_rp_def(data, delete))
else: else:
result_templates.append(create_rp_statement(data, delete)) result_templates.append(create_rp_statement(data, delete))
else: elif "acl_ruleset" in resource_key: # acl rules management
if "acl_ruleset" in resource_key: # acl rules management
result_templates.extend(acl_mgmt(resource_value,vendor, delete)) result_templates.extend(acl_mgmt(resource_value,vendor, delete))
else:
if "ip_link" in resource_key:
result_templates.append(ip_link_mgmt(resource_value,vendor,delete))
return result_templates return result_templates
\ No newline at end of file
...@@ -272,4 +272,105 @@ def cli_compose_config(resources, delete: bool, host: str, user: str, passw: str ...@@ -272,4 +272,105 @@ def cli_compose_config(resources, delete: bool, host: str, user: str, passw: str
# Close the SSH client # Close the SSH client
ssh_client.close() ssh_client.close()
\ No newline at end of file def ufi_interface(resources, delete: bool): #Method used for configuring via CLI directly L2VPN in CISCO devices
key_value_data = {}
for path, json_str in resources:
key_value_data[path] = json_str
# initialize the SSH client
ssh_client = paramiko.SSHClient()
ssh_client.load_system_host_keys()
# add to known hosts
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
ssh_client.connect(hostname='10.95.90.75', username='dnroot', password='dnroot', look_for_keys=False)
LOGGER.warning("Connection successful")
except:
LOGGER.warning("[!] Cannot connect to the SSH Server")
exit()
interface = 'ge100-0/0/3/1'
ip = '1.1.1.1'
mask = '24'
vlan = '1212'
try:
# Open an SSH shell
channel = ssh_client.invoke_shell()
time.sleep(5)
channel.send('config\n')
time.sleep(1)
channel.send(f'interfaces {interface} \n')
time.sleep(1)
channel.send('admin-state enabled \n')
time.sleep(1)
channel.send(f'ipv4-address {ip}/{mask} \n')
time.sleep(1)
channel.send(f'vlan-id {vlan} \n')
time.sleep(1)
channel.send('commit\n')
time.sleep(1)
output = channel.recv(65535).decode('utf-8')
LOGGER.warning(output)
# Close the SSH shell
channel.close()
except Exception as e:
LOGGER.exception(f"Error with the CLI configuration: {e}")
# Close the SSH client
ssh_client.close()
def cisco_interface(resources, delete: bool, host: str, user: str, passw: str): #Method used for configuring via CLI directly L2VPN in CISCO devices
key_value_data = {}
for path, json_str in resources:
key_value_data[path] = json_str
# initialize the SSH client
ssh_client = paramiko.SSHClient()
ssh_client.load_system_host_keys()
# add to known hosts
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
ssh_client.connect(hostname='10.90.95.150', username='cisco', password='cisco123', look_for_keys=False)
LOGGER.warning("Connection successful")
except:
LOGGER.warning("[!] Cannot connect to the SSH Server")
exit()
interface = 'FourHundredGigE0/0/0/10.1212'
ip = '1.1.1.1'
mask = '24'
vlan = '1212'
try:
# Open an SSH shell
channel = ssh_client.invoke_shell()
time.sleep(1)
channel.send('config\n')
time.sleep(0.1)
channel.send(f'interface {interface} \n')
time.sleep(0.1)
channel.send('no shutdown\n')
time.sleep(0.1)
channel.send(f'ipv4 address {ip}/{mask} \n')
time.sleep(0.1)
channel.send(f'encapsulation dot1q {vlan} \n')
time.sleep(0.1)
channel.send('commit\n')
time.sleep(0.1)
output = channel.recv(65535).decode('utf-8')
LOGGER.warning(output)
# Close the SSH shell
channel.close()
except Exception as e:
LOGGER.exception(f"Error with the CLI configuration: {e}")
# Close the SSH client
ssh_client.close()
# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Build, tag, and push the Docker image to the GitLab Docker registry
build osm_client:
variables:
IMAGE_NAME: 'osm_client' # name of the microservice
MOCK_IMAGE_NAME: 'mock_osm_nbi' # name of the mock
IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
stage: build
before_script:
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
script:
- docker buildx build -t "$IMAGE_NAME:$IMAGE_TAG" -f ./src/$IMAGE_NAME/Dockerfile .
- docker tag "$IMAGE_NAME:$IMAGE_TAG" "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
- docker push "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
# Build mock images
- docker buildx build -t "$MOCK_IMAGE_NAME:$IMAGE_TAG" -f ./src/tests/tools/$MOCK_IMAGE_NAME/Dockerfile .
- docker tag "$MOCK_IMAGE_NAME:$IMAGE_TAG" "$CI_REGISTRY_IMAGE/$MOCK_IMAGE_NAME:$IMAGE_TAG"
- docker push "$CI_REGISTRY_IMAGE/$MOCK_IMAGE_NAME:$IMAGE_TAG"
after_script:
- docker images --filter="dangling=true" --quiet | xargs -r docker rmi
rules:
- if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
- if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
- changes:
- src/common/**/*.py
- proto/*.proto
- src/$IMAGE_NAME/**/*.{py,in,yml}
- src/$IMAGE_NAME/Dockerfile
- src/$IMAGE_NAME/tests/*.py
- manifests/${IMAGE_NAME}service.yaml
- src/tests/tools/$MOCK_IMAGE_NAME/**/*.{py,in,yml}
- src/tests/tools/$MOCK_IMAGE_NAME/Dockerfile
- .gitlab-ci.yml
# Apply unit test to the component
unit_test osm_client:
variables:
IMAGE_NAME: 'osm_client' # name of the microservice
MOCK_IMAGE_NAME: 'mock_osm_nbi'
IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
stage: unit_test
needs:
- build osm_client
before_script:
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
- docker ps -aq | xargs -r docker rm -f
- >
if docker network list | grep teraflowbridge; then
echo "teraflowbridge is already created";
else
docker network create -d bridge teraflowbridge;
fi
- >
if docker container ls | grep $IMAGE_NAME; then
docker rm -f $IMAGE_NAME;
else
echo "$IMAGE_NAME image is not in the system";
fi
- docker container prune -f
script:
- docker pull "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
- docker images --filter="dangling=true" --quiet | xargs -r docker rmi
- >
docker run --name $IMAGE_NAME -d -v "$PWD/src/$IMAGE_NAME/tests:/opt/results"
--network=teraflowbridge
--env LOG_LEVEL=DEBUG
--env FLASK_ENV=development
--env OSM_ADDRESS=mock_osm_nbi
$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG
- >
docker run --name $MOCK_IMAGE_NAME -d
--network=teraflowbridge
--env LOG_LEVEL=DEBUG
--env FLASK_ENV=development
$CI_REGISTRY_IMAGE/$MOCK_IMAGE_NAME:$IMAGE_TAG
- while ! docker logs $IMAGE_NAME 2>&1 | grep -q 'Configured Rules'; do sleep 1; done
- sleep 5 # Give extra time to container to get ready
- docker ps -a
- docker logs $IMAGE_NAME
- docker logs $MOCK_IMAGE_NAME
- docker exec -i $IMAGE_NAME bash -c "coverage run --append -m pytest --log-level=INFO --verbose $IMAGE_NAME/tests/test_unitary.py --junitxml=/opt/results/${IMAGE_NAME}_report_unitary.xml"
- docker exec -i $IMAGE_NAME bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing"
coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/'
after_script:
- docker logs $IMAGE_NAME
- docker rm -f $IMAGE_NAME
- docker rm -f $MOCK_IMAGE_NAME
- docker network rm teraflowbridge
rules:
- if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
- if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
- changes:
- src/common/**/*.py
- proto/*.proto
- src/$IMAGE_NAME/**/*.{py,in,yml}
- src/$IMAGE_NAME/Dockerfile
- src/$IMAGE_NAME/tests/*.py
- manifests/${IMAGE_NAME}service.yaml
- .gitlab-ci.yml
artifacts:
when: always
reports:
junit: src/$IMAGE_NAME/tests/${IMAGE_NAME}_report_*.xml
...@@ -18,7 +18,7 @@ FROM python:3.10.16-slim ...@@ -18,7 +18,7 @@ FROM python:3.10.16-slim
# Install dependencies # Install dependencies
RUN apt-get --yes --quiet --quiet update RUN apt-get --yes --quiet --quiet update
RUN apt-get --yes --quiet --quiet install wget g++ git build-essential cmake make git \ RUN apt-get --yes --quiet --quiet install wget g++ git build-essential cmake make git \
libpcre2-dev python3-dev python3-pip python3-cffi curl software-properties-common && \ libpcre2-dev python3-dev python3-pip python3-cffi curl software-properties-common libmagic1 libmagic-dev && \
rm -rf /var/lib/apt/lists/* rm -rf /var/lib/apt/lists/*
# Set Python to show logs as they occur # Set Python to show logs as they occur
......