Skip to content
Snippets Groups Projects
Commit d1712b4a authored by Lluis Gifre Renom's avatar Lluis Gifre Renom
Browse files

Initial implementations of ECOC'22 Demo

- not functiona, just skeleton to generate the demo paper.
- logic for disjoint paths to be implemented
parent f83b23a5
No related branches found
No related tags found
2 merge requests!54Release 2.0.0,!4Compute component:
Showing
with 654 additions and 58 deletions
ecoc22 0 → 120000
src/tests/ecoc22/
\ No newline at end of file
...@@ -15,8 +15,10 @@ ...@@ -15,8 +15,10 @@
from enum import Enum from enum import Enum
class DeviceTypeEnum(Enum): class DeviceTypeEnum(Enum):
EMULATED_DATACENTER = 'emu-datacenter'
EMULATED_OPTICAL_LINE_SYSTEM = 'emu-optical-line-system' EMULATED_OPTICAL_LINE_SYSTEM = 'emu-optical-line-system'
EMULATED_PACKET_ROUTER = 'emu-packet-router' EMULATED_PACKET_ROUTER = 'emu-packet-router'
DATACENTER = 'datacenter'
OPTICAL_ROADM = 'optical-roadm' OPTICAL_ROADM = 'optical-roadm'
OPTICAL_TRANDPONDER = 'optical-trandponder' OPTICAL_TRANDPONDER = 'optical-trandponder'
OPTICAL_LINE_SYSTEM = 'optical-line-system' OPTICAL_LINE_SYSTEM = 'optical-line-system'
......
...@@ -20,6 +20,7 @@ from context.proto.context_pb2 import DeviceDriverEnum, DeviceOperationalStatusE ...@@ -20,6 +20,7 @@ from context.proto.context_pb2 import DeviceDriverEnum, DeviceOperationalStatusE
DEVICE_DISABLED = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_DISABLED DEVICE_DISABLED = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_DISABLED
DEVICE_EMUDC_TYPE = DeviceTypeEnum.EMULATED_DATACENTER.value
DEVICE_EMUPR_TYPE = DeviceTypeEnum.EMULATED_PACKET_ROUTER.value DEVICE_EMUPR_TYPE = DeviceTypeEnum.EMULATED_PACKET_ROUTER.value
DEVICE_EMUOLS_TYPE = DeviceTypeEnum.EMULATED_OPTICAL_LINE_SYSTEM.value DEVICE_EMUOLS_TYPE = DeviceTypeEnum.EMULATED_OPTICAL_LINE_SYSTEM.value
DEVICE_EMU_DRIVERS = [DeviceDriverEnum.DEVICEDRIVER_UNDEFINED] DEVICE_EMU_DRIVERS = [DeviceDriverEnum.DEVICEDRIVER_UNDEFINED]
...@@ -67,6 +68,14 @@ def json_device_emulated_tapi_disabled( ...@@ -67,6 +68,14 @@ def json_device_emulated_tapi_disabled(
device_uuid, DEVICE_EMUOLS_TYPE, DEVICE_DISABLED, endpoints=endpoints, config_rules=config_rules, device_uuid, DEVICE_EMUOLS_TYPE, DEVICE_DISABLED, endpoints=endpoints, config_rules=config_rules,
drivers=drivers) drivers=drivers)
def json_device_emulated_datacenter_disabled(
device_uuid : str, endpoints : List[Dict] = [], config_rules : List[Dict] = [],
drivers : List[Dict] = DEVICE_EMU_DRIVERS
):
return json_device(
device_uuid, DEVICE_EMUDC_TYPE, DEVICE_DISABLED, endpoints=endpoints, config_rules=config_rules,
drivers=drivers)
def json_device_packetrouter_disabled( def json_device_packetrouter_disabled(
device_uuid : str, endpoints : List[Dict] = [], config_rules : List[Dict] = [], device_uuid : str, endpoints : List[Dict] = [], config_rules : List[Dict] = [],
drivers : List[Dict] = DEVICE_PR_DRIVERS drivers : List[Dict] = DEVICE_PR_DRIVERS
......
...@@ -21,34 +21,43 @@ DEFAULT_BGP_ROUTE_TARGET = '{:d}:{:d}'.format(DEFAULT_BGP_AS, 333) ...@@ -21,34 +21,43 @@ DEFAULT_BGP_ROUTE_TARGET = '{:d}:{:d}'.format(DEFAULT_BGP_AS, 333)
# device_uuid:endpoint_uuid => ( # device_uuid:endpoint_uuid => (
# device_uuid, endpoint_uuid, router_id, route_distinguisher, sub_if_index, address_ip, address_prefix) # device_uuid, endpoint_uuid, router_id, route_distinguisher, sub_if_index, address_ip, address_prefix)
BEARER_MAPPINGS = { BEARER_MAPPINGS = {
'R1-INF:13/2/1': ('R1-INF', '13/2/1', '10.10.10.1', '65000:100', 400, '3.3.2.1', 24), # OFC'22
'R2-EMU:13/2/1': ('R2-EMU', '13/2/1', '12.12.12.1', '65000:120', 450, '3.4.2.1', 24), #'R1-INF:13/2/1': ('R1-INF', '13/2/1', '10.10.10.1', '65000:100', 400, '3.3.2.1', 24),
'R3-INF:13/2/1': ('R3-INF', '13/2/1', '20.20.20.1', '65000:200', 500, '3.3.1.1', 24), #'R2-EMU:13/2/1': ('R2-EMU', '13/2/1', '12.12.12.1', '65000:120', 450, '3.4.2.1', 24),
'R4-EMU:13/2/1': ('R4-EMU', '13/2/1', '22.22.22.1', '65000:220', 550, '3.4.1.1', 24), #'R3-INF:13/2/1': ('R3-INF', '13/2/1', '20.20.20.1', '65000:200', 500, '3.3.1.1', 24),
#'R4-EMU:13/2/1': ('R4-EMU', '13/2/1', '22.22.22.1', '65000:220', 550, '3.4.1.1', 24),
'R1@D1:3/1': ('R1@D1', '3/1', '10.0.1.1', '65001:101', 100, '1.1.3.1', 24), # OECC/PSC'22 - domain 1
'R1@D1:3/2': ('R1@D1', '3/2', '10.0.1.1', '65001:101', 100, '1.1.3.2', 24), #'R1@D1:3/1': ('R1@D1', '3/1', '10.0.1.1', '65001:101', 100, '1.1.3.1', 24),
'R1@D1:3/3': ('R1@D1', '3/3', '10.0.1.1', '65001:101', 100, '1.1.3.3', 24), #'R1@D1:3/2': ('R1@D1', '3/2', '10.0.1.1', '65001:101', 100, '1.1.3.2', 24),
'R2@D1:3/1': ('R2@D1', '3/1', '10.0.1.2', '65001:102', 100, '1.2.3.1', 24), #'R1@D1:3/3': ('R1@D1', '3/3', '10.0.1.1', '65001:101', 100, '1.1.3.3', 24),
'R2@D1:3/2': ('R2@D1', '3/2', '10.0.1.2', '65001:102', 100, '1.2.3.2', 24), #'R2@D1:3/1': ('R2@D1', '3/1', '10.0.1.2', '65001:102', 100, '1.2.3.1', 24),
'R2@D1:3/3': ('R2@D1', '3/3', '10.0.1.2', '65001:102', 100, '1.2.3.3', 24), #'R2@D1:3/2': ('R2@D1', '3/2', '10.0.1.2', '65001:102', 100, '1.2.3.2', 24),
'R3@D1:3/1': ('R3@D1', '3/1', '10.0.1.3', '65001:103', 100, '1.3.3.1', 24), #'R2@D1:3/3': ('R2@D1', '3/3', '10.0.1.2', '65001:102', 100, '1.2.3.3', 24),
'R3@D1:3/2': ('R3@D1', '3/2', '10.0.1.3', '65001:103', 100, '1.3.3.2', 24), #'R3@D1:3/1': ('R3@D1', '3/1', '10.0.1.3', '65001:103', 100, '1.3.3.1', 24),
'R3@D1:3/3': ('R3@D1', '3/3', '10.0.1.3', '65001:103', 100, '1.3.3.3', 24), #'R3@D1:3/2': ('R3@D1', '3/2', '10.0.1.3', '65001:103', 100, '1.3.3.2', 24),
'R4@D1:3/1': ('R4@D1', '3/1', '10.0.1.4', '65001:104', 100, '1.4.3.1', 24), #'R3@D1:3/3': ('R3@D1', '3/3', '10.0.1.3', '65001:103', 100, '1.3.3.3', 24),
'R4@D1:3/2': ('R4@D1', '3/2', '10.0.1.4', '65001:104', 100, '1.4.3.2', 24), #'R4@D1:3/1': ('R4@D1', '3/1', '10.0.1.4', '65001:104', 100, '1.4.3.1', 24),
'R4@D1:3/3': ('R4@D1', '3/3', '10.0.1.4', '65001:104', 100, '1.4.3.3', 24), #'R4@D1:3/2': ('R4@D1', '3/2', '10.0.1.4', '65001:104', 100, '1.4.3.2', 24),
#'R4@D1:3/3': ('R4@D1', '3/3', '10.0.1.4', '65001:104', 100, '1.4.3.3', 24),
'R1@D2:3/1': ('R1@D2', '3/1', '10.0.2.1', '65002:101', 100, '2.1.3.1', 24), # OECC/PSC'22 - domain 2
'R1@D2:3/2': ('R1@D2', '3/2', '10.0.2.1', '65002:101', 100, '2.1.3.2', 24), #'R1@D2:3/1': ('R1@D2', '3/1', '10.0.2.1', '65002:101', 100, '2.1.3.1', 24),
'R1@D2:3/3': ('R1@D2', '3/3', '10.0.2.1', '65002:101', 100, '2.1.3.3', 24), #'R1@D2:3/2': ('R1@D2', '3/2', '10.0.2.1', '65002:101', 100, '2.1.3.2', 24),
'R2@D2:3/1': ('R2@D2', '3/1', '10.0.2.2', '65002:102', 100, '2.2.3.1', 24), #'R1@D2:3/3': ('R1@D2', '3/3', '10.0.2.1', '65002:101', 100, '2.1.3.3', 24),
'R2@D2:3/2': ('R2@D2', '3/2', '10.0.2.2', '65002:102', 100, '2.2.3.2', 24), #'R2@D2:3/1': ('R2@D2', '3/1', '10.0.2.2', '65002:102', 100, '2.2.3.1', 24),
'R2@D2:3/3': ('R2@D2', '3/3', '10.0.2.2', '65002:102', 100, '2.2.3.3', 24), #'R2@D2:3/2': ('R2@D2', '3/2', '10.0.2.2', '65002:102', 100, '2.2.3.2', 24),
'R3@D2:3/1': ('R3@D2', '3/1', '10.0.2.3', '65002:103', 100, '2.3.3.1', 24), #'R2@D2:3/3': ('R2@D2', '3/3', '10.0.2.2', '65002:102', 100, '2.2.3.3', 24),
'R3@D2:3/2': ('R3@D2', '3/2', '10.0.2.3', '65002:103', 100, '2.3.3.2', 24), #'R3@D2:3/1': ('R3@D2', '3/1', '10.0.2.3', '65002:103', 100, '2.3.3.1', 24),
'R3@D2:3/3': ('R3@D2', '3/3', '10.0.2.3', '65002:103', 100, '2.3.3.3', 24), #'R3@D2:3/2': ('R3@D2', '3/2', '10.0.2.3', '65002:103', 100, '2.3.3.2', 24),
'R4@D2:3/1': ('R4@D2', '3/1', '10.0.2.4', '65002:104', 100, '2.4.3.1', 24), #'R3@D2:3/3': ('R3@D2', '3/3', '10.0.2.3', '65002:103', 100, '2.3.3.3', 24),
'R4@D2:3/2': ('R4@D2', '3/2', '10.0.2.4', '65002:104', 100, '2.4.3.2', 24), #'R4@D2:3/1': ('R4@D2', '3/1', '10.0.2.4', '65002:104', 100, '2.4.3.1', 24),
'R4@D2:3/3': ('R4@D2', '3/3', '10.0.2.4', '65002:104', 100, '2.4.3.3', 24), #'R4@D2:3/2': ('R4@D2', '3/2', '10.0.2.4', '65002:104', 100, '2.4.3.2', 24),
#'R4@D2:3/3': ('R4@D2', '3/3', '10.0.2.4', '65002:104', 100, '2.4.3.3', 24),
# ECOC'22
'CE1-PE1': ('PE1', '1/1', '10.0.0.101', '65000:101', 300, None, None),
'CE2-PE2': ('PE2', '1/1', '10.0.0.102', '65000:102', 300, None, None),
'CE3-PE3': ('PE3', '1/1', '10.0.0.103', '65000:103', 300, None, None),
'CE4-PE4': ('PE4', '1/1', '10.0.0.104', '65000:104', 300, None, None),
} }
...@@ -41,7 +41,10 @@ class L2VPN_Service(Resource): ...@@ -41,7 +41,10 @@ class L2VPN_Service(Resource):
LOGGER.debug('VPN_Id: {:s}'.format(str(vpn_id))) LOGGER.debug('VPN_Id: {:s}'.format(str(vpn_id)))
LOGGER.debug('Request: {:s}'.format(str(request))) LOGGER.debug('Request: {:s}'.format(str(request)))
# TODO: HACK ECOC'22, to be corrected
response = jsonify({}) response = jsonify({})
response.status_code = HTTP_OK
return response
try: try:
target = get_service(self.context_client, vpn_id) target = get_service(self.context_client, vpn_id)
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import time, random
from ctypes import Union from ctypes import Union
import json, logging import json, logging
from typing import Dict from typing import Dict
...@@ -39,6 +40,12 @@ def process_site_network_access(context_client : ContextClient, site_network_acc ...@@ -39,6 +40,12 @@ def process_site_network_access(context_client : ContextClient, site_network_acc
vpn_id = site_network_access['vpn-attachment']['vpn-id'] vpn_id = site_network_access['vpn-attachment']['vpn-id']
cvlan_id = site_network_access['connection']['tagged-interface']['dot1q-vlan-tagged']['cvlan-id'] cvlan_id = site_network_access['connection']['tagged-interface']['dot1q-vlan-tagged']['cvlan-id']
bearer_reference = site_network_access['bearer']['bearer-reference'] bearer_reference = site_network_access['bearer']['bearer-reference']
access_priority = site_network_access.get('availability', {}).get('access-priority')
single_active = site_network_access.get('availability', {}).get('single-active')
all_active = site_network_access.get('availability', {}).get('all-active')
diversity_constraints = site_network_access.get('access-diversity', {}).get('constraints', {}).get('constraint', [])
# TODO: manage targets of constraints, right now, only type of constraint is considered
diversity_constraints = [constraint['constraint-type'] for constraint in diversity_constraints]
mapping = BEARER_MAPPINGS.get(bearer_reference) mapping = BEARER_MAPPINGS.get(bearer_reference)
if mapping is None: if mapping is None:
...@@ -139,19 +146,29 @@ def process_site_network_access(context_client : ContextClient, site_network_acc ...@@ -139,19 +146,29 @@ def process_site_network_access(context_client : ContextClient, site_network_acc
raise Exception(msg.format( raise Exception(msg.format(
str(json_settings['vlan_id']), str(cvlan_id))) str(json_settings['vlan_id']), str(cvlan_id)))
if 'address_ip' not in json_settings: # missing, add it if address_ip is not None:
json_settings['address_ip'] = address_ip if 'address_ip' not in json_settings: # missing, add it
elif json_settings['address_ip'] != address_ip: # differs, raise exception json_settings['address_ip'] = address_ip
msg = 'Specified AddressIP({:s}) differs from Service AddressIP({:s})' elif json_settings['address_ip'] != address_ip: # differs, raise exception
raise Exception(msg.format( msg = 'Specified AddressIP({:s}) differs from Service AddressIP({:s})'
str(json_settings['address_ip']), str(address_ip))) raise Exception(msg.format(
str(json_settings['address_ip']), str(address_ip)))
if 'address_prefix' not in json_settings: # missing, add it if address_prefix is not None:
json_settings['address_prefix'] = address_prefix if 'address_prefix' not in json_settings: # missing, add it
elif json_settings['address_prefix'] != address_prefix: # differs, raise exception json_settings['address_prefix'] = address_prefix
msg = 'Specified AddressPrefix({:s}) differs from Service AddressPrefix({:s})' elif json_settings['address_prefix'] != address_prefix: # differs, raise exception
raise Exception(msg.format( msg = 'Specified AddressPrefix({:s}) differs from Service AddressPrefix({:s})'
str(json_settings['address_prefix']), str(address_prefix))) raise Exception(msg.format(
str(json_settings['address_prefix']), str(address_prefix)))
if address_prefix is not None:
if 'address_prefix' not in json_settings: # missing, add it
json_settings['address_prefix'] = address_prefix
elif json_settings['address_prefix'] != address_prefix: # differs, raise exception
msg = 'Specified AddressPrefix({:s}) differs from Service AddressPrefix({:s})'
raise Exception(msg.format(
str(json_settings['address_prefix']), str(address_prefix)))
config_rule.resource_value = json.dumps(json_settings, sort_keys=True) config_rule.resource_value = json.dumps(json_settings, sort_keys=True)
break break
...@@ -160,14 +177,31 @@ def process_site_network_access(context_client : ContextClient, site_network_acc ...@@ -160,14 +177,31 @@ def process_site_network_access(context_client : ContextClient, site_network_acc
config_rule = target.service_config.config_rules.add() # pylint: disable=no-member config_rule = target.service_config.config_rules.add() # pylint: disable=no-member
config_rule.action = ConfigActionEnum.CONFIGACTION_SET config_rule.action = ConfigActionEnum.CONFIGACTION_SET
config_rule.resource_key = endpoint_settings_key config_rule.resource_key = endpoint_settings_key
config_rule.resource_value = json.dumps({ resource_value = {
'router_id': router_id, 'router_id': router_id,
'route_distinguisher': route_distinguisher, 'route_distinguisher': route_distinguisher,
'sub_interface_index': sub_if_index, 'sub_interface_index': sub_if_index,
'vlan_id': cvlan_id, 'vlan_id': cvlan_id,
'address_ip': address_ip, 'address_ip': address_ip,
'address_prefix': address_prefix, 'address_prefix': address_prefix,
}, sort_keys=True) }
if access_priority is not None: resource_value['access_priority'] = access_priority
if single_active is not None and len(single_active) > 0: resource_value['access_active'] = 'single'
if all_active is not None and len(all_active) > 0: resource_value['access_active'] = 'all'
config_rule.resource_value = json.dumps(resource_value, sort_keys=True)
for constraint in target.service_constraints: # pylint: disable=no-member
if constraint.constraint_type == 'diversity' and len(diversity_constraints) > 0:
constraint_value = set(json.loads(constraint.constraint_value))
constraint_value.update(diversity_constraints)
constraint.constraint_value = json.dumps(sorted(list(constraint_value)), sort_keys=True)
break
else:
# not found, and there are diversity constraints, add them
if len(diversity_constraints) > 0:
constraint = target.service_constraints.add() # pylint: disable=no-member
constraint.constraint_type = 'diversity'
constraint.constraint_value = json.dumps(sorted(list(diversity_constraints)), sort_keys=True)
return target return target
...@@ -183,21 +217,22 @@ def process_list_site_network_access( ...@@ -183,21 +217,22 @@ def process_list_site_network_access(
for site_network_access in request_data['ietf-l2vpn-svc:site-network-access']: for site_network_access in request_data['ietf-l2vpn-svc:site-network-access']:
sna_request = process_site_network_access(context_client, site_network_access) sna_request = process_site_network_access(context_client, site_network_access)
LOGGER.debug('sna_request = {:s}'.format(grpc_message_to_json_string(sna_request))) LOGGER.debug('sna_request = {:s}'.format(grpc_message_to_json_string(sna_request)))
try: #try:
if isinstance(sna_request, Service): # if isinstance(sna_request, Service):
sna_reply = service_client.UpdateService(sna_request) # sna_reply = service_client.UpdateService(sna_request)
if sna_reply != sna_request.service_id: # pylint: disable=no-member # if sna_reply != sna_request.service_id: # pylint: disable=no-member
raise Exception('Service update failed. Wrong Service Id was returned') # raise Exception('Service update failed. Wrong Service Id was returned')
elif isinstance(sna_request, Slice): # elif isinstance(sna_request, Slice):
sna_reply = slice_client.UpdateSlice(sna_request) # sna_reply = slice_client.UpdateSlice(sna_request)
if sna_reply != sna_request.slice_id: # pylint: disable=no-member # if sna_reply != sna_request.slice_id: # pylint: disable=no-member
raise Exception('Slice update failed. Wrong Slice Id was returned') # raise Exception('Slice update failed. Wrong Slice Id was returned')
else: # else:
raise NotImplementedError('Support for Class({:s}) not implemented'.format(str(type(sna_request)))) # raise NotImplementedError('Support for Class({:s}) not implemented'.format(str(type(sna_request))))
except Exception as e: # pylint: disable=broad-except #except Exception as e: # pylint: disable=broad-except
msg = 'Something went wrong Updating Service {:s}' # msg = 'Something went wrong Updating Service {:s}'
LOGGER.exception(msg.format(grpc_message_to_json_string(sna_request))) # LOGGER.exception(msg.format(grpc_message_to_json_string(sna_request)))
errors.append({'error': str(e)}) # errors.append({'error': str(e)})
time.sleep(random.random() / 10.0)
response = jsonify(errors) response = jsonify(errors)
response.status_code = HTTP_NOCONTENT if len(errors) == 0 else HTTP_SERVERERROR response.status_code = HTTP_NOCONTENT if len(errors) == 0 else HTTP_SERVERERROR
......
...@@ -33,6 +33,7 @@ the Layer 2 service. ...@@ -33,6 +33,7 @@ the Layer 2 service.
import requests import requests
import uuid import uuid
import logging import logging
import copy
#from osm_ro_plugin.sdnconn import SdnConnectorBase, SdnConnectorError #from osm_ro_plugin.sdnconn import SdnConnectorBase, SdnConnectorError
from .sdnconn import SdnConnectorBase, SdnConnectorError from .sdnconn import SdnConnectorBase, SdnConnectorError
...@@ -222,8 +223,29 @@ class WimconnectorIETFL2VPN(SdnConnectorBase): ...@@ -222,8 +223,29 @@ class WimconnectorIETFL2VPN(SdnConnectorBase):
http_code=response_service_creation.status_code, http_code=response_service_creation.status_code,
) )
"""Second step, create the connections and vpn attachments""" self.logger.info('connection_points = {:s}'.format(str(connection_points)))
# Check if protected paths are requested
extended_connection_points = []
for connection_point in connection_points: for connection_point in connection_points:
extended_connection_points.append(connection_point)
connection_point_wan_info = self.search_mapp(connection_point)
service_mapping_info = connection_point_wan_info.get('service_mapping_info', {})
redundant_service_endpoint_ids = service_mapping_info.get('redundant')
if redundant_service_endpoint_ids is None: continue
if len(redundant_service_endpoint_ids) == 0: continue
for redundant_service_endpoint_id in redundant_service_endpoint_ids:
redundant_connection_point = copy.deepcopy(connection_point)
redundant_connection_point['service_endpoint_id'] = redundant_service_endpoint_id
extended_connection_points.append(redundant_connection_point)
self.logger.info('extended_connection_points = {:s}'.format(str(extended_connection_points)))
"""Second step, create the connections and vpn attachments"""
for connection_point in extended_connection_points:
connection_point_wan_info = self.search_mapp(connection_point) connection_point_wan_info = self.search_mapp(connection_point)
site_network_access = {} site_network_access = {}
connection = {} connection = {}
...@@ -264,6 +286,23 @@ class WimconnectorIETFL2VPN(SdnConnectorBase): ...@@ -264,6 +286,23 @@ class WimconnectorIETFL2VPN(SdnConnectorBase):
site_network_access["bearer"] = connection_point_wan_info[ site_network_access["bearer"] = connection_point_wan_info[
"service_mapping_info" "service_mapping_info"
]["bearer"] ]["bearer"]
access_priority = connection_point_wan_info["service_mapping_info"].get("priority")
if access_priority is not None:
availability = {}
availability["access-priority"] = access_priority
availability["single-active"] = [None]
site_network_access["availability"] = availability
constraint = {}
constraint['constraint-type'] = 'end-to-end-diverse'
constraint['target'] = {'all-other-accesses': [None]}
access_diversity = {}
access_diversity['constraints'] = {'constraint': []}
access_diversity['constraints']['constraint'].append(constraint)
site_network_access["access-diversity"] = access_diversity
site_network_accesses = {} site_network_accesses = {}
site_network_access_list = [] site_network_access_list = []
site_network_access_list.append(site_network_access) site_network_access_list.append(site_network_access)
......
# Add here your files containing confidential testbed details such as IP addresses, ports, usernames, passwords, etc.
descriptors_real.json
# ECOC'22 Demo - Disjoint DC-2-DC L3VPN Service
This functional test reproduces the experimental assessment of "<ECOC-22 title>" presented at [ECOC'22](https://www.ecoc2022.org/).
## Functional test folder
This functional test can be found in folder `./src/tests/ecoc22/`. A convenience alias `./ecoc22/` pointing to that folder has been defined.
## Execute with real devices
This functional test has only been tested with emulated devices; however, if you have access to real devices, you can modify the files `./ecoc22/tests/Objects.py` and `./ofc22/tests/Credentials.py` to point to your devices, and map to your network topology.
__Important:__ The OpenConfigDriver, the P4Driver, and the TrandportApiDriver have to be considered as experimental. The configuration and monitoring capabilities they support are limited or partially implemented. Use them with care.
## Deployment
To run this functional test, it is assumed you have deployed a Kubernetes-based environment as described in [Wiki: Installing Kubernetes on your Linux machine](https://gitlab.com/teraflow-h2020/controller/-/wikis/Installing-Kubernetes-on-your-Linux-machine).
After installing Kubernetes, you can run it to deploy the appropriate components. Feel free to adapt it your particular case following the instructions described in [Wiki: Deploying a TeraFlow OS test instance](https://gitlab.com/teraflow-h2020/controller/-/wikis/Deploying-a-TeraFlow-OS-test-instance).
__Important:__
- The `./ecoc22/deploy_in_kubernetes.sh` assumes you have installed the appropriate development dependencies using the `install_development_dependencies.sh` script.
- Before running the scripts in this folder, remember to update the environment variable K8S_HOSTNAME to point to the Kubernetes node you will be using as described in [Wiki: Deploying a TeraFlow OS test instance](https://gitlab.com/teraflow-h2020/controller/-/wikis/Deploying-a-TeraFlow-OS-test-instance).
For your convenience, the configuration script `./ecoc22/deploy_in_kubernetes.sh` has been already defined. The script will take some minutes to download the dependencies, build the micro-services, deploy them, and leave them ready for operation. The deployment will finish with a report of the items that have been created.
## Access to the WebUI and Dashboard
When the deployment completes, you can connect to the TeraFlow OS WebUI and Dashboards as described in [Wiki: Using the WebUI](https://gitlab.com/teraflow-h2020/controller/-/wikis/Using-the-WebUI), or directly navigating to `http://[your-node-ip]:30800` for the WebUI and `http://[your-node-ip]:30300` for the Grafana Dashboard.
Notes:
- the default credentials for the Grafana Dashboiard is user/pass: `admin`/`admin123+`.
- this functional test does not involve the Monitoring component, so no monitoring data is plotted in Grafana.
## Test execution
To execute this functional test, four main steps needs to be carried out:
1. Device bootstrapping
2. L3VPN Service creation
3. L3VPN Service removal
4. Cleanup
Upon the execution of each test progresses, a report will be generated indicating PASSED / FAILED / SKIPPED. If there is some error during the execution, you should see a detailed report on the error. See the troubleshooting section in that case.
Feel free to check the logs of the different components using the appropriate `ecoc22/show_logs_[component].sh` scripts after you execute each step.
### 1. Device bootstrapping
This step configures some basic entities (Context and Topology), the devices, and the links in the topology. The expected results are:
- The devices to be incorporated into the Topology.
- The devices to be pre-configured and initialized as ENABLED by the Automation component.
- The monitoring for the device ports (named as endpoints in TeraFlow OS) to be activated and data collection to automatically start.
- The links to be added to the topology.
To run this step, execute the following script:
`./ofc22/run_test_01_bootstrap.sh`
When the script finishes, check in the Grafana L3-Monitoring Dashboard and you should see the monitoring data being plotted and updated every 5 seconds (by default). Given that there is no service configured, you should see a 0-valued flat plot.
In the WebUI, select the "admin" Context. In the "Devices" tab you should see that 5 different emulated devices have been created and activated: 4 packet routers, and 1 optical line system controller. Besides, in the "Services" tab you should see that there is no service created. Note here that the emulated devices produce synthetic randomly-generated data and do not care about the services configured.
### 2. L3VPN Service creation
This step configures a new service emulating the request an OSM WIM would make by means of a Mock OSM instance.
To run this step, execute the following script:
`./ofc22/run_test_02_create_service.sh`
When the script finishes, check the WebUI "Services" tab. You should see that two services have been created, one for the optical layer and another for the packet layer. Besides, you can check the "Devices" tab to see the configuration rules that have been configured in each device. In the Grafana Dashboard, given that there is now a service configured, you should see the plots with the monitored data for the device. By default, device R1-INF is selected.
### 3. L3VPN Service removal
This step deconfigures the previously created services emulating the request an OSM WIM would make by means of a Mock OSM instance.
To run this step, execute the following script:
`./ofc22/run_test_03_delete_service.sh`
When the script finishes, check the WebUI "Services" tab. You should see that the two services have been removed. Besides, in the "Devices" tab you can see that the appropriate configuration rules have been deconfigured. In the Grafana Dashboard, given that there is no service configured, you should see a 0-valued flat plot again.
### 4. Cleanup
This last step just performs a cleanup of the scenario removing all the TeraFlow OS entities for completeness.
To run this step, execute the following script:
`./ofc22/run_test_04_cleanup.sh`
When the script finishes, check the WebUI "Devices" tab, you should see that the devices have been removed. Besides, in the "Services" tab you can see that the "admin" Context has no services given that that context has been removed.
## Troubleshooting
Different scripts are provided to help in troubleshooting issues in the execution of the test. These scripts are:
- `./ofc22/show_deployment.sh`: this script reports the items belonging to this deployment. Use it to validate that all the pods, deployments and replica sets are ready and have a state of "running"; and the services are deployed and have appropriate IP addresses and ports.
- `ofc22/show_logs_automation.sh`: this script reports the logs for the automation component.
- `ofc22/show_logs_compute.sh`: this script reports the logs for the compute component.
- `ofc22/show_logs_context.sh`: this script reports the logs for the context component.
- `ofc22/show_logs_device.sh`: this script reports the logs for the device component.
- `ofc22/show_logs_monitoring.sh`: this script reports the logs for the monitoring component.
- `ofc22/show_logs_service.sh`: this script reports the logs for the service component.
- `ofc22/show_logs_webui.sh`: this script reports the logs for the webui component.
# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/bin/bash
# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ECOC 22 deployment settings
export REGISTRY_IMAGE=""
export COMPONENTS="context device service slice compute webui"
export IMAGE_TAG="ecoc22"
export K8S_NAMESPACE="ecoc22"
export K8S_HOSTNAME="kubernetes-master"
export EXTRA_MANIFESTS="./ecoc22/expose_services.yaml"
export GRAFANA_PASSWORD="admin123+"
./deploy_in_kubernetes.sh
#!/bin/bash
# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
export COMPONENTS="context device service slice compute webui"
export K8S_NAMESPACE="ecoc22"
mkdir -p tmp/exec_logs/$K8S_NAMESPACE/
rm tmp/exec_logs/$K8S_NAMESPACE/*
for COMPONENT in $COMPONENTS; do
kubectl --namespace $K8S_NAMESPACE logs deployment/${COMPONENT}service -c server > tmp/exec_logs/$K8S_NAMESPACE/$COMPONENT.log
done
# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
apiVersion: v1
kind: Service
metadata:
name: contextservice-public
labels:
app: contextservice
spec:
type: NodePort
selector:
app: contextservice
ports:
- name: grpc
protocol: TCP
port: 1010
targetPort: 1010
nodePort: 30101
- name: redis
protocol: TCP
port: 6379
targetPort: 6379
nodePort: 30637
- name: http
protocol: TCP
port: 8080
targetPort: 8080
nodePort: 31808
---
apiVersion: v1
kind: Service
metadata:
name: deviceservice-public
labels:
app: deviceservice
spec:
type: NodePort
selector:
app: deviceservice
ports:
- name: grpc
protocol: TCP
port: 2020
targetPort: 2020
nodePort: 30202
---
apiVersion: v1
kind: Service
metadata:
name: monitoringservice-public
labels:
app: monitoringservice
spec:
type: NodePort
selector:
app: monitoringservice
ports:
- name: influx
protocol: TCP
port: 8086
targetPort: 8086
nodePort: 30886
---
apiVersion: v1
kind: Service
metadata:
name: computeservice-public
spec:
type: NodePort
selector:
app: computeservice
ports:
- name: http
protocol: TCP
port: 8080
targetPort: 8080
nodePort: 30808
---
apiVersion: v1
kind: Service
metadata:
name: webuiservice-public
labels:
app: webuiservice
spec:
type: NodePort
selector:
app: webuiservice
ports:
- name: http
protocol: TCP
port: 8004
targetPort: 8004
nodePort: 30800
- name: grafana
protocol: TCP
port: 3000
targetPort: 3000
nodePort: 30300
#!/bin/bash
# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
PROJECTDIR=`pwd`
cd $PROJECTDIR/src
RCFILE=$PROJECTDIR/coverage/.coveragerc
COVERAGEFILE=$PROJECTDIR/coverage/.coverage
# Configure the correct folder on the .coveragerc file
cat $PROJECTDIR/coverage/.coveragerc.template | sed s+~/teraflow/controller+$PROJECTDIR+g > $RCFILE
# Destroy old coverage file
rm -f $COVERAGEFILE
# Set the name of the Kubernetes namespace and hostname to use.
K8S_NAMESPACE="ecoc22"
# K8S_HOSTNAME="kubernetes-master"
# dynamically gets the name of the K8s master node
K8S_HOSTNAME=`kubectl get nodes --selector=node-role.kubernetes.io/master | tr -s " " | cut -f1 -d" " | sed -n '2 p'`
# Flush Context database
kubectl --namespace $K8S_NAMESPACE exec -it deployment/contextservice --container redis -- redis-cli FLUSHALL
export CONTEXTSERVICE_SERVICE_HOST=$(kubectl get node $K8S_HOSTNAME -o 'jsonpath={.status.addresses[?(@.type=="InternalIP")].address}')
export CONTEXTSERVICE_SERVICE_PORT_GRPC=$(kubectl get service contextservice-public --namespace $K8S_NAMESPACE -o 'jsonpath={.spec.ports[?(@.port==1010)].nodePort}')
export DEVICESERVICE_SERVICE_HOST=$(kubectl get node $K8S_HOSTNAME -o 'jsonpath={.status.addresses[?(@.type=="InternalIP")].address}')
export DEVICESERVICE_SERVICE_PORT_GRPC=$(kubectl get service deviceservice-public --namespace $K8S_NAMESPACE -o 'jsonpath={.spec.ports[?(@.port==2020)].nodePort}')
export COMPUTESERVICE_SERVICE_HOST=$(kubectl get node $K8S_HOSTNAME -o 'jsonpath={.status.addresses[?(@.type=="InternalIP")].address}')
export COMPUTESERVICE_SERVICE_PORT_HTTP=$(kubectl get service computeservice-public --namespace $K8S_NAMESPACE -o 'jsonpath={.spec.ports[?(@.port==8080)].nodePort}')
# Useful flags for pytest:
#-o log_cli=true -o log_file=device.log -o log_file_level=DEBUG
# Run functional test and analyze coverage of code at same time
coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
tests/ecoc22/tests/test_functional_bootstrap.py
#!/bin/bash
# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
PROJECTDIR=`pwd`
cd $PROJECTDIR/src
RCFILE=$PROJECTDIR/coverage/.coveragerc
COVERAGEFILE=$PROJECTDIR/coverage/.coverage
# Set the name of the Kubernetes namespace and hostname to use.
K8S_NAMESPACE="ecoc22"
# dynamically gets the name of the K8s master node
K8S_HOSTNAME=`kubectl get nodes --selector=node-role.kubernetes.io/master | tr -s " " | cut -f1 -d" " | sed -n '2 p'`
export CONTEXTSERVICE_SERVICE_HOST=$(kubectl get node $K8S_HOSTNAME -o 'jsonpath={.status.addresses[?(@.type=="InternalIP")].address}')
export CONTEXTSERVICE_SERVICE_PORT_GRPC=$(kubectl get service contextservice-public --namespace $K8S_NAMESPACE -o 'jsonpath={.spec.ports[?(@.port==1010)].nodePort}')
export DEVICESERVICE_SERVICE_HOST=$(kubectl get node $K8S_HOSTNAME -o 'jsonpath={.status.addresses[?(@.type=="InternalIP")].address}')
export DEVICESERVICE_SERVICE_PORT_GRPC=$(kubectl get service deviceservice-public --namespace $K8S_NAMESPACE -o 'jsonpath={.spec.ports[?(@.port==2020)].nodePort}')
export COMPUTESERVICE_SERVICE_HOST=$(kubectl get node $K8S_HOSTNAME -o 'jsonpath={.status.addresses[?(@.type=="InternalIP")].address}')
export COMPUTESERVICE_SERVICE_PORT_HTTP=$(kubectl get service computeservice-public --namespace $K8S_NAMESPACE -o 'jsonpath={.spec.ports[?(@.port==8080)].nodePort}')
# Useful flags for pytest:
#-o log_cli=true -o log_file=device.log -o log_file_level=DEBUG
# Run functional test and analyze coverage of code at same time
coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose -o log_cli=true \
tests/ecoc22/tests/test_functional_create_service.py
#!/bin/bash
# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
PROJECTDIR=`pwd`
cd $PROJECTDIR/src
RCFILE=$PROJECTDIR/coverage/.coveragerc
COVERAGEFILE=$PROJECTDIR/coverage/.coverage
# Set the name of the Kubernetes namespace and hostname to use.
K8S_NAMESPACE="ecoc22"
# dynamically gets the name of the K8s master node
K8S_HOSTNAME=`kubectl get nodes --selector=node-role.kubernetes.io/master | tr -s " " | cut -f1 -d" " | sed -n '2 p'`
export CONTEXTSERVICE_SERVICE_HOST=$(kubectl get node $K8S_HOSTNAME -o 'jsonpath={.status.addresses[?(@.type=="InternalIP")].address}')
export CONTEXTSERVICE_SERVICE_PORT_GRPC=$(kubectl get service contextservice-public --namespace $K8S_NAMESPACE -o 'jsonpath={.spec.ports[?(@.port==1010)].nodePort}')
export DEVICESERVICE_SERVICE_HOST=$(kubectl get node $K8S_HOSTNAME -o 'jsonpath={.status.addresses[?(@.type=="InternalIP")].address}')
export DEVICESERVICE_SERVICE_PORT_GRPC=$(kubectl get service deviceservice-public --namespace $K8S_NAMESPACE -o 'jsonpath={.spec.ports[?(@.port==2020)].nodePort}')
export COMPUTESERVICE_SERVICE_HOST=$(kubectl get node $K8S_HOSTNAME -o 'jsonpath={.status.addresses[?(@.type=="InternalIP")].address}')
export COMPUTESERVICE_SERVICE_PORT_HTTP=$(kubectl get service computeservice-public --namespace $K8S_NAMESPACE -o 'jsonpath={.spec.ports[?(@.port==8080)].nodePort}')
# Useful flags for pytest:
#-o log_cli=true -o log_file=device.log -o log_file_level=DEBUG
# Run functional test and analyze coverage of code at same time
coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
tests/ecoc22/tests/test_functional_delete_service.py
#!/bin/bash
# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
PROJECTDIR=`pwd`
cd $PROJECTDIR/src
RCFILE=$PROJECTDIR/coverage/.coveragerc
COVERAGEFILE=$PROJECTDIR/coverage/.coverage
# Set the name of the Kubernetes namespace and hostname to use.
K8S_NAMESPACE="ecoc22"
# dynamically gets the name of the K8s master node
K8S_HOSTNAME=`kubectl get nodes --selector=node-role.kubernetes.io/master | tr -s " " | cut -f1 -d" " | sed -n '2 p'`
export CONTEXTSERVICE_SERVICE_HOST=$(kubectl get node $K8S_HOSTNAME -o 'jsonpath={.status.addresses[?(@.type=="InternalIP")].address}')
export CONTEXTSERVICE_SERVICE_PORT_GRPC=$(kubectl get service contextservice-public --namespace $K8S_NAMESPACE -o 'jsonpath={.spec.ports[?(@.port==1010)].nodePort}')
export DEVICESERVICE_SERVICE_HOST=$(kubectl get node $K8S_HOSTNAME -o 'jsonpath={.status.addresses[?(@.type=="InternalIP")].address}')
export DEVICESERVICE_SERVICE_PORT_GRPC=$(kubectl get service deviceservice-public --namespace $K8S_NAMESPACE -o 'jsonpath={.spec.ports[?(@.port==2020)].nodePort}')
export COMPUTESERVICE_SERVICE_HOST=$(kubectl get node $K8S_HOSTNAME -o 'jsonpath={.status.addresses[?(@.type=="InternalIP")].address}')
export COMPUTESERVICE_SERVICE_PORT_HTTP=$(kubectl get service computeservice-public --namespace $K8S_NAMESPACE -o 'jsonpath={.spec.ports[?(@.port==8080)].nodePort}')
# Useful flags for pytest:
#-o log_cli=true -o log_file=device.log -o log_file_level=DEBUG
# Run functional test and analyze coverage of code at same time
coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
tests/ecoc22/tests/test_functional_cleanup.py
#!/bin/bash
# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
K8S_NAMESPACE="ecoc22"
kubectl --namespace $K8S_NAMESPACE get all
#!/bin/bash
# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
K8S_NAMESPACE="ecoc22"
kubectl --namespace $K8S_NAMESPACE logs deployment/computeservice
#!/bin/bash
# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
K8S_NAMESPACE="ecoc22"
kubectl --namespace $K8S_NAMESPACE logs deployment/contextservice -c server
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment