......@@ -24,6 +24,22 @@ ALL_COMPONENTS="${ALL_COMPONENTS} dbscanserving opticalattackmitigator opticalat
ALL_COMPONENTS="${ALL_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector l3_distributedattackdetector"
TFS_COMPONENTS=${TFS_COMPONENTS:-$ALL_COMPONENTS}
# Some components require libyang built from source code
# - Ref: https://github.com/CESNET/libyang
# - Ref: https://github.com/CESNET/libyang-python/
echo "Installing libyang..."
sudo apt-get --yes --quiet --quiet update
sudo apt-get --yes --quiet --quiet install build-essential cmake libpcre2-dev python3-dev python3-cffi
mkdir libyang
git clone https://github.com/CESNET/libyang.git libyang
mkdir libyang/build
cd libyang/build
cmake -D CMAKE_BUILD_TYPE:String="Release" ..
make
sudo make install
sudo ldconfig
cd ../..
echo "Updating PIP, SetupTools and Wheel..."
pip install --upgrade pip # ensure next packages get the latest versions
pip install --upgrade setuptools wheel # bring basic tooling for other requirements
......
......@@ -41,6 +41,10 @@ spec:
value: "nats"
- name: LOG_LEVEL
value: "INFO"
- name: ALLOW_EXPLICIT_ADD_DEVICE_TO_TOPOLOGY
value: "FALSE"
- name: ALLOW_EXPLICIT_ADD_LINK_TO_TOPOLOGY
value: "FALSE"
envFrom:
- secretRef:
name: crdb-data
......
#!/bin/bash
# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
PROJECTDIR=`pwd`
cd $PROJECTDIR/src
RCFILE=$PROJECTDIR/coverage/.coveragerc
# Run unitary tests and analyze coverage of code at same time
# helpful pytest flags: --log-level=INFO -o log_cli=true --verbose --maxfail=1 --durations=0
coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
nbi/tests/test_etsi_bwm.py
coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
nbi/tests/test_ietf_l2vpn.py
coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
nbi/tests/test_ietf_l3vpn.py
coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
nbi/tests/test_ietf_network.py
......@@ -20,5 +20,6 @@ cd $PROJECTDIR/src
RCFILE=$PROJECTDIR/coverage/.coveragerc
# Run unitary tests and analyze coverage of code at same time
# helpful pytest flags: --log-level=INFO -o log_cli=true --verbose --maxfail=1 --durations=0
coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
nbi/tests/test_unitary.py
nbi/tests/test_etsi_bwm.py
#!/bin/bash
# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
PROJECTDIR=`pwd`
cd $PROJECTDIR/src
RCFILE=$PROJECTDIR/coverage/.coveragerc
# Run unitary tests and analyze coverage of code at same time
# helpful pytest flags: --log-level=INFO -o log_cli=true --verbose --maxfail=1 --durations=0
coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
nbi/tests/test_ietf_l2vpn.py
#!/bin/bash
# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
PROJECTDIR=`pwd`
cd $PROJECTDIR/src
RCFILE=$PROJECTDIR/coverage/.coveragerc
# Run unitary tests and analyze coverage of code at same time
# helpful pytest flags: --log-level=INFO -o log_cli=true --verbose --maxfail=1 --durations=0
coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
nbi/tests/test_ietf_l3vpn.py
#!/bin/bash
# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
PROJECTDIR=`pwd`
cd $PROJECTDIR/src
RCFILE=$PROJECTDIR/coverage/.coveragerc
# Run unitary tests and analyze coverage of code at same time
# helpful pytest flags: --log-level=INFO -o log_cli=true --verbose --maxfail=1 --durations=0
coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
nbi/tests/test_ietf_network.py
......@@ -20,6 +20,7 @@ class DeviceTypeEnum(Enum):
NETWORK = 'network'
# Emulated device types
EMULATED_CLIENT = 'emu-client'
EMULATED_DATACENTER = 'emu-datacenter'
EMULATED_MICROWAVE_RADIO_SYSTEM = 'emu-microwave-radio-system'
EMULATED_OPEN_LINE_SYSTEM = 'emu-open-line-system'
......@@ -33,6 +34,7 @@ class DeviceTypeEnum(Enum):
EMULATED_XR_CONSTELLATION = 'emu-xr-constellation'
# Real device types
CLIENT = 'client'
DATACENTER = 'datacenter'
MICROWAVE_RADIO_SYSTEM = 'microwave-radio-system'
OPEN_LINE_SYSTEM = 'open-line-system'
......
......@@ -37,7 +37,9 @@ class InMemoryObjectDatabase:
LOGGER.debug('[get_entry] BEFORE database={:s}'.format(str(self._database)))
container = self._get_container(container_name)
if entry_uuid not in container:
context.abort(grpc.StatusCode.NOT_FOUND, str('{:s}({:s}) not found'.format(container_name, entry_uuid)))
MSG = '{:s}({:s}) not found; available({:s})'
msg = str(MSG.format(container_name, entry_uuid, str(container.keys())))
context.abort(grpc.StatusCode.NOT_FOUND, msg)
return container[entry_uuid]
def set_entry(self, container_name : str, entry_uuid : str, entry : Any) -> Any:
......
......@@ -26,6 +26,8 @@ from common.proto.context_pb2 import (
Topology, TopologyDetails, TopologyEvent, TopologyId, TopologyIdList, TopologyList)
from common.proto.context_pb2_grpc import ContextServiceServicer
from common.tools.grpc.Tools import grpc_message_to_json, grpc_message_to_json_string
from common.tools.object_factory.Device import json_device_id
from common.tools.object_factory.Link import json_link_id
from .InMemoryObjectDatabase import InMemoryObjectDatabase
from .MockMessageBroker import (
TOPIC_CONNECTION, TOPIC_CONTEXT, TOPIC_DEVICE, TOPIC_LINK, TOPIC_SERVICE, TOPIC_SLICE, TOPIC_TOPOLOGY,
......@@ -143,17 +145,60 @@ class MockServicerImpl_Context(ContextServiceServicer):
def SetTopology(self, request: Topology, context : grpc.ServicerContext) -> TopologyId:
LOGGER.debug('[SetTopology] request={:s}'.format(grpc_message_to_json_string(request)))
container_name = 'topology[{:s}]'.format(str(request.topology_id.context_id.context_uuid.uuid))
context_uuid = str(request.topology_id.context_id.context_uuid.uuid)
container_name = 'topology[{:s}]'.format(context_uuid)
topology_uuid = request.topology_id.topology_uuid.uuid
if self.obj_db.has_entry(container_name, topology_uuid):
# merge device_ids and link_ids from database and request, and update request
db_topology = self.obj_db.get_entry(container_name, topology_uuid, context)
device_uuids = set()
for device_id in request.device_ids: device_uuids.add(device_id.device_uuid.uuid)
for device_id in db_topology.device_ids: device_uuids.add(device_id.device_uuid.uuid)
link_uuids = set()
for link_id in request.link_ids: link_uuids.add(link_id.link_uuid.uuid)
for link_id in db_topology.link_ids: link_uuids.add(link_id.link_uuid.uuid)
rw_request = Topology()
rw_request.CopyFrom(request)
del rw_request.device_ids[:]
for device_uuid in sorted(device_uuids):
rw_request.device_ids.append(DeviceId(**json_device_id(device_uuid)))
del rw_request.link_ids[:]
for link_uuid in sorted(link_uuids):
rw_request.link_ids.append(LinkId(**json_link_id(link_uuid)))
request = rw_request
reply,_ = self._set(request, container_name, topology_uuid, 'topology_id', TOPIC_TOPOLOGY)
context_ = self.obj_db.get_entry('context', context_uuid, context)
for _topology_id in context_.topology_ids:
if _topology_id.topology_uuid.uuid == topology_uuid: break
else:
# topology not found, add it
context_.topology_ids.add().topology_uuid.uuid = topology_uuid
LOGGER.debug('[SetTopology] reply={:s}'.format(grpc_message_to_json_string(reply)))
return reply
def RemoveTopology(self, request: TopologyId, context : grpc.ServicerContext) -> Empty:
LOGGER.debug('[RemoveTopology] request={:s}'.format(grpc_message_to_json_string(request)))
container_name = 'topology[{:s}]'.format(str(request.context_id.context_uuid.uuid))
context_uuid = str(request.context_id.context_uuid.uuid)
container_name = 'topology[{:s}]'.format(context_uuid)
topology_uuid = request.topology_uuid.uuid
reply = self._del(request, container_name, topology_uuid, 'topology_id', TOPIC_TOPOLOGY, context)
context_ = self.obj_db.get_entry('context', context_uuid, context)
for _topology_id in context_.topology_ids:
if _topology_id.topology_uuid.uuid == topology_uuid:
context_.topology_ids.remove(_topology_id)
break
LOGGER.debug('[RemoveTopology] reply={:s}'.format(grpc_message_to_json_string(reply)))
return reply
......@@ -368,17 +413,34 @@ class MockServicerImpl_Context(ContextServiceServicer):
def SetSlice(self, request: Slice, context : grpc.ServicerContext) -> SliceId:
LOGGER.debug('[SetSlice] request={:s}'.format(grpc_message_to_json_string(request)))
container_name = 'slice[{:s}]'.format(str(request.slice_id.context_id.context_uuid.uuid))
context_uuid = str(request.slice_id.context_id.context_uuid.uuid)
container_name = 'slice[{:s}]'.format(context_uuid)
slice_uuid = request.slice_id.slice_uuid.uuid
reply,_ = self._set(request, container_name, slice_uuid, 'slice_id', TOPIC_SLICE)
context_ = self.obj_db.get_entry('context', context_uuid, context)
for _slice_id in context_.slice_ids:
if _slice_id.slice_uuid.uuid == slice_uuid: break
else:
# slice not found, add it
context_.slice_ids.add().slice_uuid.uuid = slice_uuid
LOGGER.debug('[SetSlice] reply={:s}'.format(grpc_message_to_json_string(reply)))
return reply
def RemoveSlice(self, request: SliceId, context : grpc.ServicerContext) -> Empty:
LOGGER.debug('[RemoveSlice] request={:s}'.format(grpc_message_to_json_string(request)))
container_name = 'slice[{:s}]'.format(str(request.context_id.context_uuid.uuid))
context_uuid = str(request.context_id.context_uuid.uuid)
container_name = 'slice[{:s}]'.format(context_uuid)
slice_uuid = request.slice_uuid.uuid
reply = self._del(request, container_name, slice_uuid, 'slice_id', TOPIC_SLICE, context)
context_ = self.obj_db.get_entry('context', context_uuid, context)
for _slice_id in context_.slice_ids:
if _slice_id.slice_uuid.uuid == slice_uuid:
context_.slice_ids.remove(_slice_id)
break
LOGGER.debug('[RemoveSlice] reply={:s}'.format(grpc_message_to_json_string(reply)))
return reply
......@@ -443,17 +505,34 @@ class MockServicerImpl_Context(ContextServiceServicer):
def SetService(self, request: Service, context : grpc.ServicerContext) -> ServiceId:
LOGGER.debug('[SetService] request={:s}'.format(grpc_message_to_json_string(request)))
container_name = 'service[{:s}]'.format(str(request.service_id.context_id.context_uuid.uuid))
context_uuid = str(request.service_id.context_id.context_uuid.uuid)
container_name = 'service[{:s}]'.format(context_uuid)
service_uuid = request.service_id.service_uuid.uuid
reply,_ = self._set(request, container_name, service_uuid, 'service_id', TOPIC_SERVICE)
context_ = self.obj_db.get_entry('context', context_uuid, context)
for _service_id in context_.service_ids:
if _service_id.service_uuid.uuid == service_uuid: break
else:
# service not found, add it
context_.service_ids.add().service_uuid.uuid = service_uuid
LOGGER.debug('[SetService] reply={:s}'.format(grpc_message_to_json_string(reply)))
return reply
def RemoveService(self, request: ServiceId, context : grpc.ServicerContext) -> Empty:
LOGGER.debug('[RemoveService] request={:s}'.format(grpc_message_to_json_string(request)))
container_name = 'service[{:s}]'.format(str(request.context_id.context_uuid.uuid))
context_uuid = str(request.context_id.context_uuid.uuid)
container_name = 'service[{:s}]'.format(context_uuid)
service_uuid = request.service_uuid.uuid
reply = self._del(request, container_name, service_uuid, 'service_id', TOPIC_SERVICE, context)
context_ = self.obj_db.get_entry('context', context_uuid, context)
for _service_id in context_.service_ids:
if _service_id.service_uuid.uuid == service_uuid:
context_.service_ids.remove(_service_id)
break
LOGGER.debug('[RemoveService] reply={:s}'.format(grpc_message_to_json_string(reply)))
return reply
......
......@@ -65,7 +65,7 @@ def get_topology(
def get_topology_details(
context_client : ContextClient, topology_uuid : str, context_uuid : str = DEFAULT_CONTEXT_NAME,
rw_copy : bool = False
) -> Optional[Topology]:
) -> Optional[TopologyDetails]:
try:
# pylint: disable=no-member
topology_id = TopologyId()
......
......@@ -240,11 +240,16 @@ class DescriptorLoader:
self._process_descr('slice', 'add', self.__ctx_cli.SetSlice, Slice, self.__slices )
self._process_descr('connection', 'add', self.__ctx_cli.SetConnection, Connection, self.__connections )
# Update context and topology is useless:
# - devices and links are assigned to topologies automatically by Context component
# - topologies, services, and slices are assigned to contexts automatically by Context component
# By default the Context component automatically assigns devices and links to topologies based on their
# endpoints, and assigns topologies, services, and slices to contexts based on their identifiers.
# The following statement is useless; up to now, any use case requires assigning a topology, service, or
# slice to a different context.
#self._process_descr('context', 'update', self.__ctx_cli.SetContext, Context, self.__contexts )
#self._process_descr('topology', 'update', self.__ctx_cli.SetTopology, Topology, self.__topologies )
# In some cases, it might be needed to assign devices and links to multiple topologies; the
# following statement performs that assignment.
self._process_descr('topology', 'update', self.__ctx_cli.SetTopology, Topology, self.__topologies )
#self.__ctx_cli.close()
......@@ -272,11 +277,16 @@ class DescriptorLoader:
self._process_descr('slice', 'add', self.__slc_cli.CreateSlice, Slice, self.__slices_add )
self._process_descr('slice', 'update', self.__slc_cli.UpdateSlice, Slice, self.__slices )
# Update context and topology is useless:
# - devices and links are assigned to topologies automatically by Context component
# - topologies, services, and slices are assigned to contexts automatically by Context component
# By default the Context component automatically assigns devices and links to topologies based on their
# endpoints, and assigns topologies, services, and slices to contexts based on their identifiers.
# The following statement is useless; up to now, any use case requires assigning a topology, service, or
# slice to a different context.
#self._process_descr('context', 'update', self.__ctx_cli.SetContext, Context, self.__contexts )
#self._process_descr('topology', 'update', self.__ctx_cli.SetTopology, Topology, self.__topologies )
# In some cases, it might be needed to assign devices and links to multiple topologies; the
# following statement performs that assignment.
self._process_descr('topology', 'update', self.__ctx_cli.SetTopology, Topology, self.__topologies )
#self.__slc_cli.close()
#self.__svc_cli.close()
......
......@@ -57,6 +57,8 @@ def format_custom_config_rules(config_rules : List[Dict]) -> List[Dict]:
if isinstance(custom_resource_value, (dict, list)):
custom_resource_value = json.dumps(custom_resource_value, sort_keys=True, indent=0)
config_rule['custom']['resource_value'] = custom_resource_value
elif not isinstance(custom_resource_value, str):
config_rule['custom']['resource_value'] = str(custom_resource_value)
return config_rules
def format_device_custom_config_rules(device : Dict) -> Dict:
......
......@@ -37,7 +37,8 @@ class GenericRestServer(threading.Thread):
self.bind_port = bind_port
self.base_url = base_url
self.bind_address = get_http_bind_address() if bind_address is None else bind_address
self.endpoint = 'http://{:s}:{:s}{:s}'.format(str(self.bind_address), str(self.bind_port), str(self.base_url))
self.endpoint = 'http://{:s}:{:s}'.format(str(self.bind_address), str(self.bind_port))
if self.base_url is not None: self.endpoint += str(self.base_url)
self.srv = None
self.ctx = None
self.app = Flask(__name__)
......
......@@ -12,3 +12,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from common.Settings import get_setting
TRUE_VALUES = {'Y', 'YES', 'T', 'TRUE', 'E', 'ENABLE', 'ENABLED'}
def is_enabled(setting_name : str, default_value : bool) -> bool:
_is_enabled = get_setting(setting_name, default=None)
if _is_enabled is None: return default_value
str_is_enabled = str(_is_enabled).upper()
return str_is_enabled in TRUE_VALUES
DEFAULT_VALUE = False
ALLOW_EXPLICIT_ADD_DEVICE_TO_TOPOLOGY = is_enabled('ALLOW_EXPLICIT_ADD_DEVICE_TO_TOPOLOGY', DEFAULT_VALUE)
ALLOW_EXPLICIT_ADD_LINK_TO_TOPOLOGY = is_enabled('ALLOW_EXPLICIT_ADD_LINK_TO_TOPOLOGY', DEFAULT_VALUE)
......@@ -102,14 +102,17 @@ def link_set(db_engine : Engine, messagebroker : MessageBroker, request : Link)
total_capacity_gbps, used_capacity_gbps = None, None
if request.HasField('attributes'):
attributes = request.attributes
# In proto3, HasField() does not work for scalar fields, using ListFields() instead.
attribute_names = set([field.name for field,_ in attributes.ListFields()])
if 'total_capacity_gbps' in attribute_names:
total_capacity_gbps = attributes.total_capacity_gbps
if 'used_capacity_gbps' in attribute_names:
used_capacity_gbps = attributes.used_capacity_gbps
elif total_capacity_gbps is not None:
used_capacity_gbps = total_capacity_gbps
else:
used_capacity_gbps = 0.0
link_data = [{
'link_uuid' : link_uuid,
......
......@@ -17,17 +17,20 @@ from sqlalchemy.dialects.postgresql import insert
from sqlalchemy.engine import Engine
from sqlalchemy.orm import Session, selectinload, sessionmaker
from sqlalchemy_cockroachdb import run_transaction
from typing import Dict, List, Optional
from typing import Dict, List, Optional, Set
from common.proto.context_pb2 import (
ContextId, Empty, EventTypeEnum, Topology, TopologyDetails, TopologyId, TopologyIdList, TopologyList)
from common.message_broker.MessageBroker import MessageBroker
from common.method_wrappers.ServiceExceptions import NotFoundException
from common.tools.object_factory.Context import json_context_id
from common.tools.object_factory.Topology import json_topology_id
from context.Config import ALLOW_EXPLICIT_ADD_DEVICE_TO_TOPOLOGY, ALLOW_EXPLICIT_ADD_LINK_TO_TOPOLOGY
from .models.DeviceModel import DeviceModel
from .models.LinkModel import LinkModel
from .models.TopologyModel import TopologyDeviceModel, TopologyLinkModel, TopologyModel
from .uuids.Context import context_get_uuid
from .uuids.Device import device_get_uuid
from .uuids.Link import link_get_uuid
from .uuids.Topology import topology_get_uuid
from .Events import notify_event_context, notify_event_topology
......@@ -94,15 +97,40 @@ def topology_set(db_engine : Engine, messagebroker : MessageBroker, request : To
if len(topology_name) == 0: topology_name = request.topology_id.topology_uuid.uuid
context_uuid,topology_uuid = topology_get_uuid(request.topology_id, topology_name=topology_name, allow_random=True)
# Ignore request.device_ids and request.link_ids. They are used for retrieving devices and links added into the
# topology. Explicit addition into the topology is done automatically when creating the devices and links, based
# on the topologies specified in the endpoints associated with the devices and links.
# By default, ignore request.device_ids and request.link_ids. They are used for retrieving
# devices and links added into the topology. Explicit addition into the topology is done
# automatically when creating the devices and links, based on the topologies specified in
# the endpoints associated with the devices and links.
# In some cases, it might be needed to add them explicitly; to allow that, activate flags
# ALLOW_EXPLICIT_ADD_DEVICE_TO_TOPOLOGY and/or ALLOW_EXPLICIT_ADD_LINK_TO_TOPOLOGY.
related_devices : List[Dict] = list()
if ALLOW_EXPLICIT_ADD_DEVICE_TO_TOPOLOGY:
device_uuids : Set[str] = set()
for device_id in request.device_ids:
device_uuid = device_get_uuid(device_id)
if device_uuid not in device_uuids: continue
related_devices.append({'topology_uuid': topology_uuid, 'device_uuid': device_uuid})
device_uuids.add(device_uuid)
else:
if len(request.device_ids) > 0: # pragma: no cover
LOGGER.warning('Items in field "device_ids" ignored. This field is used for retrieval purposes only.')
MSG = 'ALLOW_EXPLICIT_ADD_DEVICE_TO_TOPOLOGY={:s}; '.format(str(ALLOW_EXPLICIT_ADD_DEVICE_TO_TOPOLOGY))
MSG += 'Items in field "device_ids" ignored. This field is used for retrieval purposes only.'
LOGGER.warning(MSG)
related_links : List[Dict] = list()
if ALLOW_EXPLICIT_ADD_LINK_TO_TOPOLOGY:
link_uuids : Set[str] = set()
for link_id in request.link_ids:
link_uuid = link_get_uuid(link_id)
if link_uuid not in link_uuids: continue
related_links.append({'topology_uuid': topology_uuid, 'link_uuid': link_uuid})
link_uuids.add(link_uuid)
else:
if len(request.link_ids) > 0: # pragma: no cover
LOGGER.warning('Items in field "link_ids" ignored. This field is used for retrieval purposes only.')
MSG = 'ALLOW_EXPLICIT_ADD_LINK_TO_TOPOLOGY={:s}; '.format(str(ALLOW_EXPLICIT_ADD_LINK_TO_TOPOLOGY))
MSG += 'Items in field "link_ids" ignored. This field is used for retrieval purposes only.'
LOGGER.warning(MSG)
now = datetime.datetime.utcnow()
topology_data = [{
......@@ -124,7 +152,28 @@ def topology_set(db_engine : Engine, messagebroker : MessageBroker, request : To
)
stmt = stmt.returning(TopologyModel.created_at, TopologyModel.updated_at)
created_at,updated_at = session.execute(stmt).fetchone()
return updated_at > created_at
updated = updated_at > created_at
updated_topology_device = False
if len(related_devices) > 0:
stmt = insert(TopologyDeviceModel).values(related_devices)
stmt = stmt.on_conflict_do_nothing(
index_elements=[TopologyDeviceModel.topology_uuid, TopologyDeviceModel.device_uuid]
)
topology_device_inserts = session.execute(stmt)
updated_topology_device = int(topology_device_inserts.rowcount) > 0
updated_topology_link = False
if len(related_links) > 0:
stmt = insert(TopologyLinkModel).values(related_links)
stmt = stmt.on_conflict_do_nothing(
index_elements=[TopologyLinkModel.topology_uuid, TopologyLinkModel.link_uuid]
)
topology_link_inserts = session.execute(stmt)
updated_topology_link = int(topology_link_inserts.rowcount) > 0
return updated or updated_topology_device or updated_topology_link
updated = run_transaction(sessionmaker(bind=db_engine), callback)
context_id = json_context_id(context_uuid)
......
......@@ -100,8 +100,13 @@ def test_link(context_client : ContextClient) -> None:
attribute_names = set([field.name for field,_ in response.attributes.ListFields()])
assert 'total_capacity_gbps' in attribute_names
assert abs(response.attributes.total_capacity_gbps - 100) < 1.e-12
assert 'used_capacity_gbps' in attribute_names
assert abs(response.attributes.used_capacity_gbps - response.attributes.total_capacity_gbps) < 1.e-12
assert (
('used_capacity_gbps' not in attribute_names) or (
('used_capacity_gbps' in attribute_names) and (
abs(response.attributes.used_capacity_gbps - response.attributes.total_capacity_gbps) < 1.e-12
)
)
)
# ----- List when the object exists --------------------------------------------------------------------------------
response = context_client.ListLinkIds(Empty())
......
......@@ -48,15 +48,28 @@ unit_test nbi:
- build nbi
before_script:
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
- if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create -d bridge teraflowbridge; fi
- if docker container ls | grep $IMAGE_NAME; then docker rm -f $IMAGE_NAME; else echo "$IMAGE_NAME image is not in the system"; fi
- >
if docker network list | grep teraflowbridge; then
echo "teraflowbridge is already created";
else
docker network create -d bridge teraflowbridge;
fi
- >
if docker container ls | grep $IMAGE_NAME; then
docker rm -f $IMAGE_NAME;
else
echo "$IMAGE_NAME image is not in the system";
fi
script:
- docker pull "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
- docker run --name $IMAGE_NAME -d -p 9090:9090 -v "$PWD/src/$IMAGE_NAME/tests:/opt/results" --network=teraflowbridge $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG
- sleep 5
- docker ps -a
- docker logs $IMAGE_NAME
- docker exec -i $IMAGE_NAME bash -c "coverage run -m pytest --log-level=INFO --verbose $IMAGE_NAME/tests/test_unitary.py --junitxml=/opt/results/${IMAGE_NAME}_report.xml"
- docker exec -i $IMAGE_NAME bash -c "coverage run --append -m pytest --log-level=INFO --verbose $IMAGE_NAME/tests/test_ietf_l2vpn.py --junitxml=/opt/results/${IMAGE_NAME}_report_ietf_l2vpn.xml"
- docker exec -i $IMAGE_NAME bash -c "coverage run --append -m pytest --log-level=INFO --verbose $IMAGE_NAME/tests/test_ietf_network.py --junitxml=/opt/results/${IMAGE_NAME}_report_ietf_network.xml"
- docker exec -i $IMAGE_NAME bash -c "coverage run --append -m pytest --log-level=INFO --verbose $IMAGE_NAME/tests/test_ietf_l3vpn.py --junitxml=/opt/results/${IMAGE_NAME}_report_ietf_l3vpn.xml"
- docker exec -i $IMAGE_NAME bash -c "coverage run --append -m pytest --log-level=INFO --verbose $IMAGE_NAME/tests/test_etsi_bwm.py --junitxml=/opt/results/${IMAGE_NAME}_report_etsi_bwm.xml"
- docker exec -i $IMAGE_NAME bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing"
coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/'
after_script:
......@@ -77,7 +90,7 @@ unit_test nbi:
artifacts:
when: always
reports:
junit: src/$IMAGE_NAME/tests/${IMAGE_NAME}_report.xml
junit: src/$IMAGE_NAME/tests/${IMAGE_NAME}_report_*.xml
## Deployment of the service in Kubernetes Cluster
#deploy nbi:
......
......@@ -53,6 +53,21 @@ RUN python3 -m grpc_tools.protoc -I=. --python_out=. --grpc_python_out=. *.proto
RUN rm *.proto
RUN find . -type f -exec sed -i -E 's/(import\ .*)_pb2/from . \1_pb2/g' {} \;
# Download, build and install libyang. Note that APT package is outdated
# - Ref: https://github.com/CESNET/libyang
# - Ref: https://github.com/CESNET/libyang-python/
RUN apt-get --yes --quiet --quiet update && \
apt-get --yes --quiet --quiet install build-essential cmake libpcre2-dev python3-dev python3-cffi && \
rm -rf /var/lib/apt/lists/*
RUN mkdir -p /var/libyang
RUN git clone https://github.com/CESNET/libyang.git /var/libyang
RUN mkdir -p /var/libyang/build
WORKDIR /var/libyang/build
RUN cmake -D CMAKE_BUILD_TYPE:String="Release" ..
RUN make
RUN make install
RUN ldconfig
# Create component sub-folders, get specific Python packages
RUN mkdir -p /var/teraflow/nbi
WORKDIR /var/teraflow/nbi
......@@ -63,9 +78,14 @@ RUN python3 -m pip install -r requirements.txt
# Add component files into working directory
WORKDIR /var/teraflow
COPY src/nbi/. nbi/
COPY src/context/. context/
COPY src/service/. service/
COPY src/slice/. slice/
COPY src/context/__init__.py context/__init__.py
COPY src/context/client/. context/client/
COPY src/device/__init__.py device/__init__.py
COPY src/device/client/. device/client/
COPY src/service/__init__.py service/__init__.py
COPY src/service/client/. service/client/
COPY src/slice/__init__.py slice/__init__.py
COPY src/slice/client/. slice/client/
RUN mkdir -p /var/teraflow/tests/tools
COPY src/tests/tools/mock_osm/. tests/tools/mock_osm/
......