Skip to content
Snippets Groups Projects
Commit 7a70472f authored by Lluis Gifre Renom's avatar Lluis Gifre Renom
Browse files

Merge branch...

Merge branch 'feat/221-minor-corrections-in-gnmi-device-driver-unitary-tests-and-service' into 'develop'

Resolve "Minor corrections in gNMI device driver unitary tests and service"

See merge request !292
parents 20095d7f 9b6a8d4d
No related branches found
No related tags found
2 merge requests!294Release TeraFlowSDN 4.0,!292Resolve "Minor corrections in gNMI device driver unitary tests and service"
......@@ -30,6 +30,8 @@ build dbscanserving:
- if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
- if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
- changes:
- src/common/**/*.py
- proto/*.proto
- src/$IMAGE_NAME/**/*.{py,in,yml}
- src/$IMAGE_NAME/Dockerfile
- src/$IMAGE_NAME/tests/*.py
......@@ -48,25 +50,33 @@ unit_test dbscanserving:
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
- if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create -d bridge teraflowbridge; fi
- if docker container ls | grep $IMAGE_NAME; then docker rm -f $IMAGE_NAME; else echo "$IMAGE_NAME container is not in the system"; fi
- docker container prune -f
script:
- docker pull "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
- docker run --name $IMAGE_NAME -d -p 10008:10008 -v "$PWD/src/$IMAGE_NAME/tests:/home/${IMAGE_NAME}/results" --network=teraflowbridge --rm $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG
- sleep 5
- docker ps -a
- docker logs $IMAGE_NAME
- >
docker run --name $IMAGE_NAME -d -p 10008:10008
--volume "$PWD/src/$IMAGE_NAME/tests:/opt/results"
--network=teraflowbridge
$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG
- docker ps -a
- sleep 5
- docker logs $IMAGE_NAME
- docker exec -i $IMAGE_NAME bash -c "coverage run -m pytest --log-level=INFO --verbose $IMAGE_NAME/tests/test_unitary.py --junitxml=/home/${IMAGE_NAME}/results/${IMAGE_NAME}_report.xml"
- >
docker exec -i $IMAGE_NAME bash -c
"coverage run -m pytest --log-level=INFO --verbose --junitxml=/opt/results/${IMAGE_NAME}_report.xml $IMAGE_NAME/tests/test_unitary.py"
- docker exec -i $IMAGE_NAME bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing"
coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/'
after_script:
- docker rm -f $IMAGE_NAME
- docker network rm teraflowbridge
- docker volume prune --force
- docker image prune --force
rules:
- if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
- if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
- if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
- changes:
- src/common/**/*.py
- proto/*.proto
- src/$IMAGE_NAME/**/*.{py,in,yml}
- src/$IMAGE_NAME/Dockerfile
- src/$IMAGE_NAME/tests/*.py
......@@ -78,29 +88,28 @@ unit_test dbscanserving:
reports:
junit: src/$IMAGE_NAME/tests/${IMAGE_NAME}_report.xml
# Deployment of the dbscanserving service in Kubernetes Cluster
# deploy dbscanserving:
# variables:
# IMAGE_NAME: 'dbscanserving' # name of the microservice
# IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
# stage: deploy
# needs:
# - unit test dbscanserving
# # - integ_test execute
# script:
# - 'sed -i "s/$IMAGE_NAME:.*/$IMAGE_NAME:$IMAGE_TAG/" manifests/${IMAGE_NAME}service.yaml'
# - kubectl version
# - kubectl get all
# - kubectl apply -f "manifests/${IMAGE_NAME}service.yaml"
# - kubectl get all
# # environment:
# # name: test
# # url: https://example.com
# # kubernetes:
# # namespace: test
# rules:
# - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
# when: manual
# - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
# when: manual
\ No newline at end of file
#deploy dbscanserving:
# variables:
# IMAGE_NAME: 'dbscanserving' # name of the microservice
# IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
# stage: deploy
# needs:
# - unit test dbscanserving
# # - integ_test execute
# script:
# - 'sed -i "s/$IMAGE_NAME:.*/$IMAGE_NAME:$IMAGE_TAG/" manifests/${IMAGE_NAME}service.yaml'
# - kubectl version
# - kubectl get all
# - kubectl apply -f "manifests/${IMAGE_NAME}service.yaml"
# - kubectl get all
# # environment:
# # name: test
# # url: https://example.com
# # kubernetes:
# # namespace: test
# rules:
# - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
# when: manual
# - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
# when: manual
......@@ -27,19 +27,6 @@ RUN GRPC_HEALTH_PROBE_VERSION=v0.2.0 && \
wget -qO/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-amd64 && \
chmod +x /bin/grpc_health_probe
# Creating a user for security reasons
RUN groupadd -r teraflow && useradd -u 1001 --no-log-init -r -m -g teraflow teraflow
USER teraflow
# set working directory
RUN mkdir -p /home/teraflow/controller/common
WORKDIR /home/teraflow/controller
# Get Python packages per module
ENV VIRTUAL_ENV=/home/teraflow/venv
RUN python3 -m venv ${VIRTUAL_ENV}
ENV PATH="${VIRTUAL_ENV}/bin:${PATH}"
# Get generic Python packages
RUN python3 -m pip install --upgrade pip
RUN python3 -m pip install --upgrade setuptools wheel
......@@ -47,36 +34,35 @@ RUN python3 -m pip install --upgrade pip-tools
# Get common Python packages
# Note: this step enables sharing the previous Docker build steps among all the Python components
COPY --chown=teraflow:teraflow common_requirements.in common_requirements.in
WORKDIR /var/teraflow
COPY common_requirements.in common_requirements.in
RUN pip-compile --quiet --output-file=common_requirements.txt common_requirements.in
RUN python3 -m pip install -r common_requirements.txt
# Add common files into working directory
WORKDIR /home/teraflow/controller/common
COPY --chown=teraflow:teraflow src/common/. ./
WORKDIR /var/teraflow/common
COPY src/common/. ./
RUN rm -rf proto
# Create proto sub-folder, copy .proto files, and generate Python code
RUN mkdir -p /home/teraflow/controller/common/proto
WORKDIR /home/teraflow/controller/common/proto
RUN mkdir -p /var/teraflow/common/proto
WORKDIR /var/teraflow/common/proto
RUN touch __init__.py
COPY --chown=teraflow:teraflow proto/*.proto ./
COPY proto/*.proto ./
RUN python3 -m grpc_tools.protoc -I=. --python_out=. --grpc_python_out=. *.proto
RUN rm *.proto
RUN find . -type f -exec sed -i -E 's/(import\ .*)_pb2/from . \1_pb2/g' {} \;
# Create module sub-folders
RUN mkdir -p /home/teraflow/controller/dbscanserving
WORKDIR /home/teraflow/controller
# Get Python packages per module
COPY --chown=teraflow:teraflow ./src/dbscanserving/requirements.in dbscanserving/requirements.in
# consider common and specific requirements to avoid inconsistencies with dependencies
RUN pip-compile --quiet --output-file=dbscanserving/requirements.txt dbscanserving/requirements.in common_requirements.in
RUN python3 -m pip install -r dbscanserving/requirements.txt
# Create component sub-folders, get specific Python packages
RUN mkdir -p /var/teraflow/dbscanserving
WORKDIR /var/teraflow/dbscanserving
COPY src/dbscanserving/requirements.in requirements.in
RUN pip-compile --quiet --output-file=requirements.txt requirements.in
RUN python3 -m pip install -r requirements.txt
# Add component files into working directory
COPY --chown=teraflow:teraflow ./src/dbscanserving/. dbscanserving
WORKDIR /var/teraflow
COPY src/dbscanserving/. dbscanserving/
# Start the service
ENTRYPOINT ["python", "-m", "dbscanserving.service"]
......@@ -14,10 +14,9 @@
import json, logging
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Union
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
from common.DeviceTypes import DeviceTypeEnum
from common.method_wrappers.ServiceExceptions import NotFoundException
from typing import List
from common.proto.qkd_app_pb2 import QKDAppStatusEnum
from common.proto.context_pb2 import (
Connection, ConnectionId, Device, DeviceDriverEnum, DeviceId, Service, ServiceId,
......
......@@ -14,13 +14,15 @@
import json, logging
from typing import Any, Dict, List, Optional, Tuple, Union
from common.proto.context_pb2 import ConfigRule, DeviceId, Service
from common.proto.context_pb2 import ConfigRule, ConnectionId, DeviceId, Service
from common.tools.object_factory.Connection import json_connection_id
from common.tools.object_factory.Device import json_device_id
from common.type_checkers.Checkers import chk_type
from service.service.service_handler_api._ServiceHandler import _ServiceHandler
from service.service.service_handler_api.SettingsHandler import SettingsHandler
from service.service.service_handler_api.Tools import get_device_endpoint_uuids, get_endpoint_matching
from .MockTaskExecutor import MockTaskExecutor
from service.service.tools.EndpointIdFormatters import endpointids_to_raw
from service.service.service_handlers.l3nm_gnmi_openconfig.ConfigRuleComposer import ConfigRuleComposer
from service.service.service_handlers.l3nm_gnmi_openconfig.StaticRouteGenerator import StaticRouteGenerator
......@@ -48,17 +50,20 @@ class MockServiceHandler(_ServiceHandler):
device_obj = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid)))
device_settings = self.__settings_handler.get_device_settings(device_obj)
self.__config_rule_composer.set_device_alias(device_obj.name, device_uuid)
_device = self.__config_rule_composer.get_device(device_obj.name)
_device.configure(device_obj, device_settings)
endpoint_obj = get_endpoint_matching(device_obj, endpoint_uuid)
endpoint_settings = self.__settings_handler.get_endpoint_settings(device_obj, endpoint_obj)
_device.set_endpoint_alias(endpoint_obj.name, endpoint_uuid)
_endpoint = _device.get_endpoint(endpoint_obj.name)
_endpoint.configure(endpoint_obj, endpoint_settings)
self.__endpoint_map[(device_uuid, endpoint_uuid)] = (device_obj.name, endpoint_obj.name)
self.__static_route_generator.compose(endpoints)
LOGGER.debug('config_rule_composer = {:s}'.format(json.dumps(self.__config_rule_composer.dump())))
def _do_configurations(
self, config_rules_per_device : Dict[str, List[Dict]], endpoints : List[Tuple[str, str, Optional[str]]],
......@@ -94,7 +99,12 @@ class MockServiceHandler(_ServiceHandler):
) -> List[Union[bool, Exception]]:
chk_type('endpoints', endpoints, list)
if len(endpoints) == 0: return []
self._compose_config_rules(endpoints)
#service_uuid = self.__service.service_id.service_uuid.uuid
connection = self.__task_executor.get_connection(ConnectionId(**json_connection_id(connection_uuid)))
connection_endpoint_ids = endpointids_to_raw(connection.path_hops_endpoint_ids)
self._compose_config_rules(connection_endpoint_ids)
#network_instance_name = service_uuid.split('-')[0]
#config_rules_per_device = self.__config_rule_composer.get_config_rules(network_instance_name, delete=False)
config_rules_per_device = self.__config_rule_composer.get_config_rules(delete=False)
LOGGER.debug('config_rules_per_device={:s}'.format(str(config_rules_per_device)))
results = self._do_configurations(config_rules_per_device, endpoints)
......@@ -106,7 +116,12 @@ class MockServiceHandler(_ServiceHandler):
) -> List[Union[bool, Exception]]:
chk_type('endpoints', endpoints, list)
if len(endpoints) == 0: return []
self._compose_config_rules(endpoints)
#service_uuid = self.__service.service_id.service_uuid.uuid
connection = self.__task_executor.get_connection(ConnectionId(**json_connection_id(connection_uuid)))
connection_endpoint_ids = endpointids_to_raw(connection.path_hops_endpoint_ids)
self._compose_config_rules(connection_endpoint_ids)
#network_instance_name = service_uuid.split('-')[0]
#config_rules_per_device = self.__config_rule_composer.get_config_rules(network_instance_name, delete=True)
config_rules_per_device = self.__config_rule_composer.get_config_rules(delete=True)
LOGGER.debug('config_rules_per_device={:s}'.format(str(config_rules_per_device)))
results = self._do_configurations(config_rules_per_device, endpoints, delete=True)
......
......@@ -14,10 +14,12 @@
import logging
from enum import Enum
from typing import Dict, Optional, Union
from typing import Any, Dict, Optional, Union
from common.method_wrappers.ServiceExceptions import NotFoundException
from common.proto.context_pb2 import Connection, Device, DeviceId, Service
from service.service.tools.ObjectKeys import get_device_key
from common.proto.context_pb2 import (
Connection, ConnectionId, Device, DeviceId, Service
)
from service.service.tools.ObjectKeys import get_connection_key, get_device_key
LOGGER = logging.getLogger(__name__)
......@@ -46,6 +48,32 @@ class MockTaskExecutor:
object_key = '{:s}:{:s}'.format(object_type.value, object_key)
self._grpc_objects_cache.pop(object_key, None)
def _store_editable_grpc_object(
self, object_type : CacheableObjectType, object_key : str, grpc_class, grpc_ro_object
) -> Any:
grpc_rw_object = grpc_class()
grpc_rw_object.CopyFrom(grpc_ro_object)
self._store_grpc_object(object_type, object_key, grpc_rw_object)
return grpc_rw_object
# ----- Connection-related methods ---------------------------------------------------------------------------------
def get_connection(self, connection_id : ConnectionId) -> Connection:
connection_key = get_connection_key(connection_id)
connection = self._load_grpc_object(CacheableObjectType.CONNECTION, connection_key)
if connection is None: raise NotFoundException('Connection', connection_key)
return connection
def set_connection(self, connection : Connection) -> None:
connection_key = get_connection_key(connection.connection_id)
self._store_grpc_object(CacheableObjectType.CONNECTION, connection_key, connection)
def delete_connection(self, connection_id : ConnectionId) -> None:
connection_key = get_connection_key(connection_id)
self._delete_grpc_object(CacheableObjectType.CONNECTION, connection_key)
# ----- Device-related methods -------------------------------------------------------------------------------------
def get_device(self, device_id : DeviceId) -> Device:
device_key = get_device_key(device_id)
device = self._load_grpc_object(CacheableObjectType.DEVICE, device_key)
......
# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Run with:
# $ PYTHONPATH=./src python -m service.tests.test_l3nm_gnmi_static_rule_gen.test_unitary_sns4sns
import logging
from typing import List, Optional, Tuple
from common.DeviceTypes import DeviceTypeEnum
from common.proto.context_pb2 import Connection, Device, DeviceOperationalStatusEnum, Service
from common.tools.object_factory.ConfigRule import json_config_rule_set
from common.tools.object_factory.Connection import json_connection
from common.tools.object_factory.Device import json_device, json_device_id
from common.tools.object_factory.EndPoint import json_endpoint, json_endpoint_id
from common.tools.object_factory.Service import json_service_l3nm_planned
from .MockServiceHandler import MockServiceHandler
from .MockTaskExecutor import CacheableObjectType, MockTaskExecutor
logging.basicConfig(level=logging.DEBUG)
LOGGER = logging.getLogger(__name__)
SERVICE = Service(**json_service_l3nm_planned(
'svc-core-edge-uuid',
endpoint_ids=[
json_endpoint_id(json_device_id('core-net'), 'eth1'),
json_endpoint_id(json_device_id('edge-net'), 'eth1'),
],
config_rules=[
json_config_rule_set('/device[core-net]/endpoint[eth1]/settings', {
'address_ip': '10.10.10.0', 'address_prefix': 24, 'index': 0
}),
json_config_rule_set('/device[r1]/endpoint[eth10]/settings', {
'address_ip': '10.10.10.229', 'address_prefix': 24, 'index': 0
}),
json_config_rule_set('/device[r2]/endpoint[eth10]/settings', {
'address_ip': '10.158.72.229', 'address_prefix': 24, 'index': 0
}),
json_config_rule_set('/device[edge-net]/endpoint[eth1]/settings', {
'address_ip': '10.158.72.0', 'address_prefix': 24, 'index': 0
}),
]
))
CONNECTION_ENDPOINTS : List[Tuple[str, str, Optional[str]]] = [
#('core-net', 'int', None),
('core-net', 'eth1', None),
('r1', 'eth10', None), ('r1', 'eth2', None),
('r2', 'eth1', None), ('r2', 'eth10', None),
('edge-net', 'eth1', None),
#('edge-net', 'int', None),
]
def test_l3nm_gnmi_static_rule_gen() -> None:
dev_op_st_enabled = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED
mock_task_executor = MockTaskExecutor()
mock_task_executor._store_grpc_object(CacheableObjectType.DEVICE, 'core-net', Device(**json_device(
'core-net', DeviceTypeEnum.EMULATED_DATACENTER.value, dev_op_st_enabled, name='core-net',
endpoints=[
json_endpoint(json_device_id('core-net'), 'int', 'packet', name='int' ),
json_endpoint(json_device_id('core-net'), 'eth1', 'packet', name='eth1'),
]
)))
mock_task_executor._store_grpc_object(CacheableObjectType.DEVICE, 'edge-net', Device(**json_device(
'edge-net', DeviceTypeEnum.EMULATED_DATACENTER.value, dev_op_st_enabled, name='edge-net',
endpoints=[
json_endpoint(json_device_id('edge-net'), 'int', 'packet', name='int' ),
json_endpoint(json_device_id('edge-net'), 'eth1', 'packet', name='eth1'),
]
)))
mock_task_executor._store_grpc_object(CacheableObjectType.DEVICE, 'r1', Device(**json_device(
'r1', DeviceTypeEnum.EMULATED_PACKET_ROUTER.value, dev_op_st_enabled, name='r1',
endpoints=[
json_endpoint(json_device_id('r1'), 'eth2', 'packet', name='eth2' ),
json_endpoint(json_device_id('r1'), 'eth10', 'packet', name='eth10'),
]
)))
mock_task_executor._store_grpc_object(CacheableObjectType.DEVICE, 'r2', Device(**json_device(
'r2', DeviceTypeEnum.EMULATED_PACKET_ROUTER.value, dev_op_st_enabled, name='r2',
endpoints=[
json_endpoint(json_device_id('r1'), 'eth1', 'packet', name='eth1' ),
json_endpoint(json_device_id('r1'), 'eth10', 'packet', name='eth10'),
]
)))
mock_task_executor._store_grpc_object(CacheableObjectType.CONNECTION, 'conn', Connection(**json_connection(
'conn', path_hops_endpoint_ids=[
json_endpoint_id(json_device_id(device_uuid), endpoint_uuid=endpoint_uuid)
for device_uuid, endpoint_uuid, _ in CONNECTION_ENDPOINTS
]
)))
mock_service_handler = MockServiceHandler(SERVICE, mock_task_executor)
mock_service_handler.SetEndpoint(CONNECTION_ENDPOINTS, connection_uuid='conn')
mock_service_handler.DeleteEndpoint(CONNECTION_ENDPOINTS, connection_uuid='conn')
if __name__ == '__main__':
test_l3nm_gnmi_static_rule_gen()
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment