Commits (49)
......@@ -31,7 +31,7 @@ include:
- local: '/src/dbscanserving/.gitlab-ci.yml'
- local: '/src/opticalattackmitigator/.gitlab-ci.yml'
- local: '/src/opticalattackdetector/.gitlab-ci.yml'
- local: '/src/opticalattackmanager/.gitlab-ci.yml'
# - local: '/src/opticalattackmanager/.gitlab-ci.yml'
- local: '/src/automation/.gitlab-ci.yml'
- local: '/src/policy/.gitlab-ci.yml'
#- local: '/src/webui/.gitlab-ci.yml'
......
......@@ -14,6 +14,7 @@
import grpc, json, logging
from typing import Any, Dict, Iterator, List, Set
from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME
from common.proto.context_pb2 import (
Connection, ConnectionEvent, ConnectionId, ConnectionIdList, ConnectionList,
Context, ContextEvent, ContextId, ContextIdList, ContextList,
......@@ -22,7 +23,7 @@ from common.proto.context_pb2 import (
Link, LinkEvent, LinkId, LinkIdList, LinkList,
Service, ServiceEvent, ServiceFilter, ServiceId, ServiceIdList, ServiceList,
Slice, SliceEvent, SliceFilter, SliceId, SliceIdList, SliceList,
Topology, TopologyEvent, TopologyId, TopologyIdList, TopologyList)
Topology, TopologyDetails, TopologyEvent, TopologyId, TopologyIdList, TopologyList)
from common.proto.context_pb2_grpc import ContextServiceServicer
from common.tests.MockMessageBroker import (
TOPIC_CONNECTION, TOPIC_CONTEXT, TOPIC_DEVICE, TOPIC_LINK, TOPIC_SERVICE, TOPIC_SLICE, TOPIC_TOPOLOGY,
......@@ -162,6 +163,29 @@ class MockServicerImpl_Context(ContextServiceServicer):
LOGGER.info('[GetTopology] reply={:s}'.format(grpc_message_to_json_string(reply)))
return reply
def GetTopologyDetails(self, request : TopologyId, context : grpc.ServicerContext) -> TopologyDetails:
LOGGER.info('[GetTopologyDetails] request={:s}'.format(grpc_message_to_json_string(request)))
context_uuid = request.context_id.context_uuid.uuid
container_name = 'topology[{:s}]'.format(str(context_uuid))
topology_uuid = request.topology_uuid.uuid
_reply = get_entry(context, self.database, container_name, topology_uuid)
reply = TopologyDetails()
reply.topology_id.CopyFrom(_reply.topology_id)
reply.name = _reply.name
if context_uuid == DEFAULT_CONTEXT_NAME and topology_uuid == DEFAULT_TOPOLOGY_NAME:
for device in get_entries(self.database, 'device'): reply.devices.append(device)
for link in get_entries(self.database, 'link'): reply.links.append(link)
else:
# TODO: to be improved; Mock does not associate devices/links to topologies automatically
for device_id in _reply.device_ids:
device = get_entry(context, self.database, 'device', device_id.device_uuid.uuid)
reply.devices.append(device)
for link_id in _reply.link_ids:
link = get_entry(context, self.database, 'link', link_id.link_uuid.uuid)
reply.links.append(link)
LOGGER.info('[GetTopologyDetails] reply={:s}'.format(grpc_message_to_json_string(reply)))
return reply
def SetTopology(self, request: Topology, context : grpc.ServicerContext) -> TopologyId:
LOGGER.info('[SetTopology] request={:s}'.format(grpc_message_to_json_string(request)))
container_name = 'topology[{:s}]'.format(str(request.topology_id.context_id.context_uuid.uuid))
......
......@@ -136,7 +136,7 @@ def json_device_tfs_disabled(
device_uuid, DEVICE_TFS_TYPE, DEVICE_DISABLED, name=name, endpoints=endpoints, config_rules=config_rules,
drivers=drivers)
def json_device_connect_rules(address : str, port : int, settings : Dict = {}):
def json_device_connect_rules(address : str, port : int, settings : Dict = {}) -> List[Dict]:
return [
json_config_rule_set('_connect/address', address),
json_config_rule_set('_connect/port', port),
......@@ -144,12 +144,7 @@ def json_device_connect_rules(address : str, port : int, settings : Dict = {}):
]
def json_device_emulated_connect_rules(
endpoint_descriptors : List[Tuple[str, str, List[int]]], address : str = DEVICE_EMU_ADDRESS,
port : int = DEVICE_EMU_PORT
):
settings = {'endpoints': [
{'uuid': endpoint_uuid, 'type': endpoint_type, 'sample_types': sample_types}
for endpoint_uuid,endpoint_type,sample_types in endpoint_descriptors
]}
endpoint_descriptors : List[Dict], address : str = DEVICE_EMU_ADDRESS, port : int = DEVICE_EMU_PORT
) -> List[Dict]:
settings = {'endpoints': endpoint_descriptors}
return json_device_connect_rules(address, port, settings=settings)
......@@ -13,7 +13,20 @@
# limitations under the License.
import copy
from typing import Dict, List, Optional, Tuple
from typing import Dict, List, Optional
def json_endpoint_descriptor(
endpoint_uuid : str, endpoint_type : str, endpoint_name : Optional[str] = None,
sample_types : List[int] = [], location : Optional[Dict] = None
) -> Dict:
result = {'uuid': endpoint_uuid, 'type': endpoint_type}
if endpoint_name is not None:
result['name'] = endpoint_name
if sample_types is not None and len(sample_types) > 0:
result['sample_types'] = sample_types
if location is not None and len(location) > 0:
result['location'] = location
return result
def json_endpoint_id(device_id : Dict, endpoint_uuid : str, topology_id : Optional[Dict] = None):
result = {'device_id': copy.deepcopy(device_id), 'endpoint_uuid': {'uuid': endpoint_uuid}}
......@@ -21,11 +34,11 @@ def json_endpoint_id(device_id : Dict, endpoint_uuid : str, topology_id : Option
return result
def json_endpoint_ids(
device_id : Dict, endpoint_descriptors : List[Tuple[str, str, List[int]]], topology_id : Optional[Dict] = None
device_id : Dict, endpoint_descriptors : List[Dict], topology_id : Optional[Dict] = None
):
return [
json_endpoint_id(device_id, endpoint_uuid, topology_id=topology_id)
for endpoint_uuid, _, _ in endpoint_descriptors
json_endpoint_id(device_id, endpoint_data['uuid'], topology_id=topology_id)
for endpoint_data in endpoint_descriptors
]
def json_endpoint(
......@@ -37,16 +50,18 @@ def json_endpoint(
'endpoint_id': json_endpoint_id(device_id, endpoint_uuid, topology_id=topology_id),
'endpoint_type': endpoint_type,
}
if len(kpi_sample_types) > 0: result['kpi_sample_types'] = copy.deepcopy(kpi_sample_types)
if location: result['endpoint_location'] = copy.deepcopy(location)
if kpi_sample_types is not None and len(kpi_sample_types) > 0:
result['kpi_sample_types'] = copy.deepcopy(kpi_sample_types)
if location is not None:
result['endpoint_location'] = copy.deepcopy(location)
return result
def json_endpoints(
device_id : Dict, endpoint_descriptors : List[Tuple[str, str, List[int]]], topology_id : Optional[Dict] = None
device_id : Dict, endpoint_descriptors : List[Dict], topology_id : Optional[Dict] = None
):
return [
json_endpoint(
device_id, endpoint_uuid, endpoint_type, topology_id=topology_id,
kpi_sample_types=endpoint_sample_types)
for endpoint_uuid, endpoint_type, endpoint_sample_types in endpoint_descriptors
device_id, endpoint_data['uuid'], endpoint_data['type'], topology_id=topology_id,
kpi_sample_types=endpoint_data.get('sample_types'), location=endpoint_data.get('location'))
for endpoint_data in endpoint_descriptors
]
......@@ -54,7 +54,7 @@ unit_test dbscanserving:
- sleep 5
- docker ps -a
- docker logs $IMAGE_NAME
- docker exec ps -a
- docker ps -a
- sleep 5
- docker logs $IMAGE_NAME
- docker exec -i $IMAGE_NAME bash -c "coverage run -m pytest --log-level=INFO --verbose $IMAGE_NAME/tests/test_unitary.py --junitxml=/home/${IMAGE_NAME}/results/${IMAGE_NAME}_report.xml"
......
......@@ -23,30 +23,27 @@ from common.proto.dbscanserving_pb2 import (DetectionRequest,
DetectionResponse, Sample)
from dbscanserving.client.DbscanServingClient import DbscanServingClient
from dbscanserving.Config import GRPC_SERVICE_PORT
from dbscanserving.service.DbscanService import DbscanService
port = 10000 + GRPC_SERVICE_PORT # avoid privileged ports
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.DEBUG)
@pytest.fixture(scope="session")
def dbscanserving_service():
_service = DbscanService(port=port)
_service = DbscanService()
_service.start()
yield _service
_service.stop()
@pytest.fixture(scope="session")
def dbscanserving_client():
def dbscanserving_client(dbscanserving_service: DbscanService):
with patch.dict(
os.environ,
{
"DBSCANSERVINGSERVICE_SERVICE_HOST": "127.0.0.1",
"DBSCANSERVINGSERVICE_SERVICE_PORT_GRPC": str(port),
"DBSCANSERVINGSERVICE_SERVICE_PORT_GRPC": str(dbscanserving_service.bind_port),
},
clear=True,
):
......
......@@ -16,13 +16,17 @@ from common.proto.kpi_sample_types_pb2 import KpiSampleType
from common.tools.object_factory.ConfigRule import json_config_rule_delete, json_config_rule_set
from common.tools.object_factory.Device import (
json_device_emulated_connect_rules, json_device_emulated_packet_router_disabled, json_device_id)
from common.tools.object_factory.EndPoint import json_endpoint_descriptor
from device.tests.CommonObjects import PACKET_PORT_SAMPLE_TYPES
DEVICE_EMU_UUID = 'R1-EMU'
DEVICE_EMU_ID = json_device_id(DEVICE_EMU_UUID)
DEVICE_EMU = json_device_emulated_packet_router_disabled(DEVICE_EMU_UUID)
DEVICE_EMU_EP_UUIDS = ['EP1', 'EP2', 'EP3', 'EP4']
DEVICE_EMU_EP_DESCS = [(ep_uuid, '10Gbps', PACKET_PORT_SAMPLE_TYPES) for ep_uuid in DEVICE_EMU_EP_UUIDS]
DEVICE_EMU_EP_DESCS = [
json_endpoint_descriptor(ep_uuid, '10Gbps', sample_types=PACKET_PORT_SAMPLE_TYPES)
for ep_uuid in DEVICE_EMU_EP_UUIDS
]
DEVICE_EMU_CONNECT_RULES = json_device_emulated_connect_rules(DEVICE_EMU_EP_DESCS)
RSRC_EP = '/endpoints/endpoint[{:s}]'
......@@ -30,7 +34,10 @@ RSRC_SUBIF = RSRC_EP + '/subinterfaces/subinterface[{:d}]'
RSRC_ADDRIPV4 = RSRC_SUBIF + '/ipv4/address[{:s}]'
DEVICE_EMU_ENDPOINTS_COOKED = []
for endpoint_uuid,endpoint_type,endpoint_sample_types in DEVICE_EMU_EP_DESCS:
for endpoint_data in DEVICE_EMU_EP_DESCS:
endpoint_uuid = endpoint_data['uuid']
endpoint_type = endpoint_data['type']
endpoint_sample_types = endpoint_data['sample_types']
endpoint_resource_key = RSRC_EP.format(str(endpoint_uuid))
sample_types = {}
for endpoint_sample_type in endpoint_sample_types:
......
......@@ -14,7 +14,7 @@
from common.tools.object_factory.Context import json_context, json_context_id
from common.tools.object_factory.Device import json_device_emulated_packet_router_disabled, json_device_id
from common.tools.object_factory.EndPoint import json_endpoints
from common.tools.object_factory.EndPoint import json_endpoint_descriptor, json_endpoints
from common.tools.object_factory.Link import compose_link
from common.tools.object_factory.Topology import json_topology, json_topology_id
......@@ -22,7 +22,10 @@ def compose_device(
device_uuid, endpoint_uuids, endpoint_type='copper', endpoint_topology_id=None, endpoint_sample_types=[]
):
device_id = json_device_id(device_uuid)
endpoints = [(endpoint_uuid, endpoint_type, endpoint_sample_types) for endpoint_uuid in endpoint_uuids]
endpoints = [
json_endpoint_descriptor(endpoint_uuid, endpoint_type, endpoint_sample_types)
for endpoint_uuid in endpoint_uuids
]
endpoints = json_endpoints(device_id, endpoints, topology_id=endpoint_topology_id)
device = json_device_emulated_packet_router_disabled(device_uuid, endpoints=endpoints)
return device_id, endpoints, device
......
......@@ -15,6 +15,7 @@
from common.proto.kpi_sample_types_pb2 import KpiSampleType
from common.tools.object_factory.Device import (
json_device_emulated_connect_rules, json_device_emulated_packet_router_disabled)
from common.tools.object_factory.EndPoint import json_endpoint_descriptor
PACKET_PORT_SAMPLE_TYPES = [
KpiSampleType.KPISAMPLETYPE_PACKETS_TRANSMITTED,
......@@ -25,6 +26,8 @@ PACKET_PORT_SAMPLE_TYPES = [
DEVICE_DEV1_UUID = 'DEV1'
ENDPOINT_END1_UUID = 'END1'
DEVICE_DEV1_ENDPOINT_DEFS = [(ENDPOINT_END1_UUID, 'copper', PACKET_PORT_SAMPLE_TYPES)]
DEVICE_DEV1_ENDPOINT_DEFS = [
json_endpoint_descriptor(ENDPOINT_END1_UUID, 'copper', sample_types=PACKET_PORT_SAMPLE_TYPES)
]
DEVICE_DEV1 = json_device_emulated_packet_router_disabled(DEVICE_DEV1_UUID)
DEVICE_DEV1_CONNECT_RULES = json_device_emulated_connect_rules(DEVICE_DEV1_ENDPOINT_DEFS)
......@@ -49,9 +49,26 @@ unit_test opticalattackdetector:
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
- if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create -d bridge teraflowbridge; fi
- if docker container ls | grep $IMAGE_NAME; then docker rm -f $IMAGE_NAME; else echo "$IMAGE_NAME image is not in the system"; fi
- if docker container ls | grep redis; then docker rm -f redis; else echo "redis image is not in the system"; fi
- if docker container ls | grep dbscanserving; then docker rm -f dbscanserving; else echo "dbscanserving image is not in the system"; fi
script:
- export REDIS_PASSWORD=$(uuidgen)
- docker pull "redis:7.0-alpine"
- docker run --name redis -d --network=teraflowbridge -p 16379:6379 -e REDIS_PASSWORD=${REDIS_PASSWORD} --rm redis:7.0-alpine redis-server --requirepass ${REDIS_PASSWORD}
- docker logs redis
- REDIS_ADDRESS=$(docker inspect redis --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
- docker pull "$CI_REGISTRY_IMAGE/dbscanserving:$IMAGE_TAG"
- docker run --name dbscanserving -d -p 10008:10008 --network=teraflowbridge --rm $CI_REGISTRY_IMAGE/dbscanserving:$IMAGE_TAG "python -m dbscanserving.service"
- docker logs dbscanserving
- DBSCANSERVING_ADDRESS=$(docker inspect dbscanserving --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
- docker pull "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
- docker run --name $IMAGE_NAME -d -p 10006:10006 --env-file "$PWD/src/$IMAGE_NAME/.env" -v "$PWD/src/$IMAGE_NAME/tests:/home/${IMAGE_NAME}/results" --network=teraflowbridge --rm $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG
- >
docker run --name $IMAGE_NAME -d -p 10006:10006
-v "$PWD/src/$IMAGE_NAME/tests:/home/${IMAGE_NAME}/results"
-e REDIS_PASSWORD=${REDIS_PASSWORD}
-e DBSCANSERVINGSERVICE_SERVICE_HOST=${DBSCANSERVING_ADDRESS}
-e CACHINGSERVICE_SERVICE_HOST=${REDIS_ADDRESS}
--network=teraflowbridge --rm $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG
- sleep 5
- docker ps -a
- docker logs $IMAGE_NAME
......@@ -59,6 +76,8 @@ unit_test opticalattackdetector:
coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/'
after_script:
- docker rm -f $IMAGE_NAME
- docker rm -f redis
- docker rm -f dbscanserving
- docker network rm teraflowbridge
rules:
- if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
......
......@@ -13,88 +13,69 @@
# limitations under the License.
import logging
import os
import uuid
import queue
from unittest.mock import patch
import pytest
from common.Constants import (DEFAULT_GRPC_GRACE_PERIOD,
DEFAULT_GRPC_MAX_WORKERS)
from common.proto import dbscanserving_pb2 as dbscan
from common.proto.optical_attack_detector_pb2 import DetectionRequest
from common.tests.MockServicerImpl_Monitoring import MockServicerImpl_Monitoring
from opticalattackdetector.client.OpticalAttackDetectorClient import \
OpticalAttackDetectorClient
from opticalattackdetector.Config import GRPC_SERVICE_PORT
from opticalattackdetector.service.OpticalAttackDetectorService import \
OpticalAttackDetectorService
# from .example_objects import CONTEXT_ID, CONTEXT_ID_2, SERVICE_DEV1_DEV2
port = 10000 + GRPC_SERVICE_PORT # avoid privileged ports
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.DEBUG)
@pytest.fixture(scope="session")
def optical_attack_detector_service():
with patch.dict(
os.environ,
{
"OPTICALATTACKDETECTORSERVICE_SERVICE_HOST": "127.0.0.1",
"OPTICALATTACKDETECTORSERVICE_SERVICE_PORT_GRPC": str(port),
"DBSCANSERVINGSERVICE_SERVICE_HOST": "127.0.0.1",
"DBSCANSERVINGSERVICE_SERVICE_PORT_GRPC": str(port),
},
clear=True,
):
_service = OpticalAttackDetectorService(
port=port,
max_workers=DEFAULT_GRPC_MAX_WORKERS,
grace_period=DEFAULT_GRPC_GRACE_PERIOD,
)
_service.start()
yield _service
_service.stop()
_service = OpticalAttackDetectorService()
_service.start()
yield _service
_service.stop()
@pytest.fixture(scope="session")
def optical_attack_detector_client(optical_attack_detector_service):
with patch.dict(
os.environ,
{
"OPTICALATTACKDETECTORSERVICE_SERVICE_HOST": "127.0.0.1",
"OPTICALATTACKDETECTORSERVICE_SERVICE_PORT_GRPC": str(port),
"DBSCANSERVINGSERVICE_SERVICE_HOST": "127.0.0.1",
"DBSCANSERVINGSERVICE_SERVICE_PORT_GRPC": str(port),
},
clear=True,
):
_client = OpticalAttackDetectorClient()
yield _client
def optical_attack_detector_client(optical_attack_detector_service: OpticalAttackDetectorService):
_client = OpticalAttackDetectorClient(
host=optical_attack_detector_service.bind_address,
port=optical_attack_detector_service.bind_port,
)
yield _client
_client.close()
def test_detect_attack(
optical_attack_detector_service: OpticalAttackDetectorService,
optical_attack_detector_client: OpticalAttackDetectorClient,
):
message = dbscan.DetectionResponse()
message.cluster_indices.extend([0, 1, -1, -1, -1])
monitoring_mock = MockServicerImpl_Monitoring(queue_samples = queue.Queue())
with patch(
"opticalattackdetector.service.OpticalAttackDetectorServiceServicerImpl.attack_mitigator_client"
) as mitigator, patch(
"opticalattackdetector.service.OpticalAttackDetectorServiceServicerImpl.monitoring_client"
"opticalattackdetector.service.OpticalAttackDetectorServiceServicerImpl.monitoring_client",
monitoring_mock,
) as monitoring, patch(
"opticalattackdetector.service.OpticalAttackDetectorServiceServicerImpl.dbscanserving_client.Detect",
# TODO: return dumb object with "cluster_indices" attribute
# idea: create new response object
return_value=message,
) as dbscanserving:
request: DetectionRequest = DetectionRequest()
request.service_id.context_id.context_uuid.uuid = ""
request.service_id.service_uuid.uuid = str("")
request.kpi_id.kpi_id.uuid = ""
optical_attack_detector_client.DetectAttack(request)
mitigator.NotifyAttack.assert_called_once()
monitoring.IncludeKpi.assert_called_once()
dbscanserving.assert_called_once()
for _ in range(10):
request: DetectionRequest = DetectionRequest()
request.service_id.context_id.context_uuid.uuid = str(uuid.uuid4())
request.service_id.service_uuid.uuid = str(uuid.uuid4())
request.kpi_id.kpi_id.uuid = "1"
optical_attack_detector_client.DetectAttack(request)
dbscanserving.assert_called_once()
monitoring.IncludeKpi.assert_called_once()
mitigator.NotifyAttack.assert_called()
......@@ -52,11 +52,11 @@ unit_test opticalattackmanager:
- if docker container ls | grep $IMAGE_NAME; then docker rm -f $IMAGE_NAME; else echo "$IMAGE_NAME image is not in the system"; fi
script:
- docker pull "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
- docker run --name $IMAGE_NAME -d -p 10005:10005 -v "$PWD/src/$IMAGE_NAME/tests:/home/teraflow/$IMAGE_NAME/results" --network=teraflowbridge $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG
- docker run --name $IMAGE_NAME -d -p 10005:10005 -e LOG_LEVEL=DEBUG -v "$PWD/src/$IMAGE_NAME/tests:/home/teraflow/controller/$IMAGE_NAME/results" --network=teraflowbridge $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG
- docker ps -a
- docker logs $IMAGE_NAME
- docker exec -i $IMAGE_NAME bash -c "coverage run -m pytest --log-level=INFO --verbose $IMAGE_NAME/tests/test_unitary.py --junitxml=/home/teraflow/$IMAGE_NAME/results/${IMAGE_NAME}_report.xml"
- docker exec -i $IMAGE_NAME bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing"
- docker exec -i $IMAGE_NAME bash -c "coverage run -m pytest --log-level=DEBUG --verbose $IMAGE_NAME/tests/test_unitary.py --junitxml=/home/teraflow/controller/$IMAGE_NAME/results/${IMAGE_NAME}_report.xml; coverage report --include='${IMAGE_NAME}/*' --show-missing"
- ls -la src/$IMAGE_NAME/tests
coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/'
after_script:
- docker rm -f $IMAGE_NAME
......@@ -74,9 +74,9 @@ unit_test opticalattackmanager:
- manifests/${IMAGE_NAME}service.yaml
- .gitlab-ci.yml
artifacts:
when: always
reports:
junit: src/$IMAGE_NAME/tests/${IMAGE_NAME}_report.xml
when: always
reports:
junit: src/$IMAGE_NAME/tests/${IMAGE_NAME}_report.xml
# Deployment of the service in Kubernetes Cluster
# deploy opticalattackmanager:
......
......@@ -13,108 +13,27 @@
# limitations under the License.
import logging
import os
from unittest.mock import patch
import uuid
import pytest
from opticalattackmanager.utils.monitor import delegate_services
from common.Constants import (DEFAULT_GRPC_GRACE_PERIOD,
DEFAULT_GRPC_MAX_WORKERS,
DEFAULT_SERVICE_GRPC_PORTS, ServiceNameEnum)
from common.proto import dbscanserving_pb2 as dbscan
from common.proto.optical_attack_detector_pb2 import DetectionRequest
from opticalattackdetector.client.OpticalAttackDetectorClient import \
OpticalAttackDetectorClient
from opticalattackdetector.Config import GRPC_SERVICE_PORT
from opticalattackdetector.service.OpticalAttackDetectorService import \
OpticalAttackDetectorService
# from .example_objects import CONTEXT_ID, CONTEXT_ID_2, SERVICE_DEV1_DEV2
port = 10000 + GRPC_SERVICE_PORT # avoid privileged ports
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.DEBUG)
@pytest.fixture(scope="session")
def optical_attack_detector_service():
with patch.dict(
os.environ,
def test_delegate_services():
service_list = [
{
"OPTICALATTACKDETECTORSERVICE_SERVICE_HOST": "127.0.0.1",
"OPTICALATTACKDETECTORSERVICE_SERVICE_PORT_GRPC": str(
1000
+ DEFAULT_SERVICE_GRPC_PORTS.get(
ServiceNameEnum.OPTICALATTACKDETECTOR.value
)
),
"OPTICALATTACKMITIGATORSERVICE_SERVICE_HOST": "127.0.0.1",
"OPTICALATTACKMITIGATORSERVICE_SERVICE_PORT_GRPC": str(
1000
+ DEFAULT_SERVICE_GRPC_PORTS.get(
ServiceNameEnum.OPTICALATTACKMITIGATOR.value
)
),
"DBSCANSERVINGSERVICE_SERVICE_HOST": "127.0.0.1",
"DBSCANSERVINGSERVICE_SERVICE_PORT_GRPC": str(
1000
+ DEFAULT_SERVICE_GRPC_PORTS.get(ServiceNameEnum.DBSCANSERVING.value)
),
},
clear=True,
):
_service = OpticalAttackDetectorService(
port=port,
max_workers=DEFAULT_GRPC_MAX_WORKERS,
grace_period=DEFAULT_GRPC_GRACE_PERIOD,
)
# mocker_context_client = mock.patch('opticalattackdetector.service.OpticalAttackDetectorServiceServicerImpl.context_client')
# mocker_context_client.start()
# mocker_influx_db = mock.patch('opticalattackdetector.service.OpticalAttackDetectorServiceServicerImpl.influxdb_client')
# mocker_influx_db.start()
_service.start()
yield _service
_service.stop()
# mocker_context_client.stop()
# mocker_influx_db.stop()
@pytest.fixture(scope="session")
def optical_attack_detector_client(optical_attack_detector_service):
with patch.dict(
os.environ,
{
"OPTICALATTACKDETECTORSERVICE_SERVICE_HOST": "127.0.0.1",
"OPTICALATTACKDETECTORSERVICE_SERVICE_PORT_GRPC": str(
1000
+ DEFAULT_SERVICE_GRPC_PORTS.get(
ServiceNameEnum.OPTICALATTACKDETECTOR.value
)
),
"OPTICALATTACKMITIGATORSERVICE_SERVICE_HOST": "127.0.0.1",
"OPTICALATTACKMITIGATORSERVICE_SERVICE_PORT_GRPC": str(
1000
+ DEFAULT_SERVICE_GRPC_PORTS.get(
ServiceNameEnum.OPTICALATTACKMITIGATOR.value
)
),
"DBSCANSERVINGSERVICE_SERVICE_HOST": "127.0.0.1",
"DBSCANSERVINGSERVICE_SERVICE_PORT_GRPC": str(
1000
+ DEFAULT_SERVICE_GRPC_PORTS.get(ServiceNameEnum.DBSCANSERVING.value)
),
},
clear=True,
):
_client = OpticalAttackDetectorClient()
yield _client
_client.close()
def test_detect_attack(
optical_attack_detector_client: OpticalAttackDetectorClient,
):
LOGGER.info("placeholder")
"context": uuid.uuid4(),
"service": uuid.uuid4(),
"kpi": 10,
}
for _ in range(10)]
delegate_services(
service_list=service_list,
start_index=0,
end_index=9,
host="127.0.0.1",
port="10006",
monitoring_interval=10,
)
......@@ -17,7 +17,6 @@ import logging
import traceback
from grpclib.client import Channel
from prometheus_client import Counter
from common.proto.asyncio.optical_attack_detector_grpc import \
OpticalAttackDetectorServiceStub
......
......@@ -33,7 +33,6 @@ build opticalattackmitigator:
- src/$IMAGE_NAME/**/*.{py,in,yml}
- src/$IMAGE_NAME/Dockerfile
- src/$IMAGE_NAME/tests/*.py
- src/$IMAGE_NAME/tests/Dockerfile
- manifests/${IMAGE_NAME}service.yaml
- .gitlab-ci.yml
......@@ -48,7 +47,7 @@ unit_test opticalattackmitigator:
before_script:
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
- if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create -d bridge teraflowbridge; fi
- if docker container ls | grep $IMAGE_NAME; then docker rm -f $IMAGE_NAME; else echo "$IMAGE_NAME image is not in the system"; fi
- if docker container ls | grep $IMAGE_NAME; then docker rm -f $IMAGE_NAME; else echo "$IMAGE_NAME container is not in the system"; fi
script:
- docker pull "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
- docker run --name $IMAGE_NAME -d -p 10007:10007 -v "$PWD/src/$IMAGE_NAME/tests:/home/${IMAGE_NAME}/results" --network=teraflowbridge --rm $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG
......@@ -66,8 +65,7 @@ unit_test opticalattackmitigator:
- src/$IMAGE_NAME/**/*.{py,in,yml}
- src/$IMAGE_NAME/Dockerfile
- src/$IMAGE_NAME/tests/*.py
- src/$IMAGE_NAME/tests/Dockerfile
- manifests/$IMAGE_NAMEservice.yaml
- manifests/${IMAGE_NAME}service.yaml
- .gitlab-ci.yml
artifacts:
when: always
......
......@@ -17,29 +17,20 @@ import os
from unittest.mock import patch
import pytest
from common.Constants import (DEFAULT_GRPC_GRACE_PERIOD,
DEFAULT_GRPC_MAX_WORKERS)
from common.proto.optical_attack_mitigator_pb2 import AttackDescription
from opticalattackmitigator.client.OpticalAttackMitigatorClient import \
OpticalAttackMitigatorClient
from opticalattackmitigator.Config import GRPC_SERVICE_PORT
from opticalattackmitigator.service.OpticalAttackMitigatorService import \
OpticalAttackMitigatorService
port = 10000 + GRPC_SERVICE_PORT # avoid privileged ports
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.DEBUG)
@pytest.fixture(scope="session")
def optical_attack_mitigator_service():
_service = OpticalAttackMitigatorService(
port=port,
max_workers=DEFAULT_GRPC_MAX_WORKERS,
grace_period=DEFAULT_GRPC_GRACE_PERIOD,
)
_service = OpticalAttackMitigatorService()
_service.start()
yield _service
_service.stop()
......@@ -51,7 +42,7 @@ def optical_attack_mitigator_client(optical_attack_mitigator_service):
os.environ,
{
"OPTICALATTACKMITIGATORSERVICE_SERVICE_HOST": "127.0.0.1",
"OPTICALATTACKMITIGATORSERVICE_SERVICE_PORT_GRPC": str(port),
"OPTICALATTACKMITIGATORSERVICE_SERVICE_PORT_GRPC": str(optical_attack_mitigator_service.bind_port),
},
clear=True,
):
......
......@@ -116,12 +116,10 @@ def eropath_to_hops(
if link_tuple is None: raise Exception('Malformed path')
ingress = next(iter([
ep_id for ep_id in link_tuple[0]['link_endpoint_ids']
if (ep_id['endpoint_id']['device_id'] == device_uuid) and\
(ep_id['endpoint_id']['endpoint_uuid'] != endpoint_uuid)
ep_id
for ep_id in link_tuple[0]['link_endpoint_ids']
if ep_id['endpoint_id']['device_id'] != device_uuid
]), None)
if ingress['endpoint_id']['device_id'] != device_uuid:
raise Exception('Malformed path')
ingress_ep = ingress['endpoint_id']['endpoint_uuid']
ingress_ep = MAP_TAPI_UUIDS.get(ingress_ep, ingress_ep)
......
......@@ -16,14 +16,14 @@ from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME
from common.tools.object_factory.Constraint import json_constraint_sla_capacity, json_constraint_sla_latency
from common.tools.object_factory.Context import json_context, json_context_id
from common.tools.object_factory.Device import json_device_emulated_packet_router_disabled, json_device_id
from common.tools.object_factory.EndPoint import json_endpoints
from common.tools.object_factory.EndPoint import json_endpoint_descriptor, json_endpoints
from common.tools.object_factory.Link import get_link_uuid, json_link, json_link_id
from common.tools.object_factory.Service import get_service_uuid, json_service_l3nm_planned
from common.tools.object_factory.Topology import json_topology, json_topology_id
def compose_device(device_uuid, endpoint_uuids, topology_id=None):
device_id = json_device_id(device_uuid)
endpoints = [(endpoint_uuid, 'copper', []) for endpoint_uuid in endpoint_uuids]
endpoints = [json_endpoint_descriptor(endpoint_uuid, 'copper', []) for endpoint_uuid in endpoint_uuids]
endpoints = json_endpoints(device_id, endpoints, topology_id=topology_id)
device = json_device_emulated_packet_router_disabled(device_uuid, endpoints=endpoints)
return device_id, endpoints, device
......
......@@ -18,7 +18,7 @@ from common.tools.object_factory.Context import json_context, json_context_id
from common.tools.object_factory.Device import (
json_device_emulated_connect_rules, json_device_emulated_datacenter_disabled,
json_device_emulated_packet_router_disabled, json_device_id)
from common.tools.object_factory.EndPoint import json_endpoints
from common.tools.object_factory.EndPoint import json_endpoint_descriptor, json_endpoints
from common.tools.object_factory.Link import get_link_uuid, json_link, json_link_id
from common.tools.object_factory.Service import get_service_uuid, json_service_l3nm_planned
from common.tools.object_factory.Topology import json_topology, json_topology_id
......@@ -29,7 +29,7 @@ ADD_CONNECT_RULES_TO_DEVICES = False
def compose_router(device_uuid, endpoint_uuids, topology_id=None):
device_id = json_device_id(device_uuid)
r_endpoints = [(endpoint_uuid, 'copper', []) for endpoint_uuid in endpoint_uuids]
r_endpoints = [json_endpoint_descriptor(endpoint_uuid, 'copper') for endpoint_uuid in endpoint_uuids]
config_rules = json_device_emulated_connect_rules(r_endpoints) if ADD_CONNECT_RULES_TO_DEVICES else []
endpoints = json_endpoints(device_id, r_endpoints, topology_id=topology_id)
j_endpoints = [] if ADD_CONNECT_RULES_TO_DEVICES else endpoints
......@@ -38,7 +38,7 @@ def compose_router(device_uuid, endpoint_uuids, topology_id=None):
def compose_datacenter(device_uuid, endpoint_uuids, topology_id=None):
device_id = json_device_id(device_uuid)
r_endpoints = [(endpoint_uuid, 'copper', []) for endpoint_uuid in endpoint_uuids]
r_endpoints = [json_endpoint_descriptor(endpoint_uuid, 'copper') for endpoint_uuid in endpoint_uuids]
config_rules = json_device_emulated_connect_rules(r_endpoints) if ADD_CONNECT_RULES_TO_DEVICES else []
endpoints = json_endpoints(device_id, r_endpoints, topology_id=topology_id)
j_endpoints = [] if ADD_CONNECT_RULES_TO_DEVICES else endpoints
......
......@@ -18,8 +18,9 @@ from common.tools.object_factory.Constraint import json_constraint_sla_capacity,
from common.tools.object_factory.Context import json_context, json_context_id
from common.tools.object_factory.Device import (
json_device_emulated_connect_rules, json_device_emulated_datacenter_disabled,
json_device_emulated_packet_router_disabled, json_device_emulated_tapi_disabled, json_device_id)
from common.tools.object_factory.EndPoint import json_endpoints
json_device_emulated_packet_router_disabled, json_device_emulated_tapi_disabled,
json_device_id)
from common.tools.object_factory.EndPoint import json_endpoint_descriptor, json_endpoints
from common.tools.object_factory.Link import get_link_uuid, json_link, json_link_id
from common.tools.object_factory.Service import get_service_uuid, json_service_l3nm_planned
from common.tools.object_factory.Topology import json_topology, json_topology_id
......@@ -30,7 +31,7 @@ ADD_CONNECT_RULES_TO_DEVICES = False
def compose_router(device_uuid, endpoint_uuids, topology_id=None):
device_id = json_device_id(device_uuid)
r_endpoints = [(endpoint_uuid, 'copper', []) for endpoint_uuid in endpoint_uuids]
r_endpoints = [json_endpoint_descriptor(endpoint_uuid, 'copper') for endpoint_uuid in endpoint_uuids]
config_rules = json_device_emulated_connect_rules(r_endpoints) if ADD_CONNECT_RULES_TO_DEVICES else []
endpoints = json_endpoints(device_id, r_endpoints, topology_id=topology_id)
j_endpoints = [] if ADD_CONNECT_RULES_TO_DEVICES else endpoints
......@@ -39,7 +40,7 @@ def compose_router(device_uuid, endpoint_uuids, topology_id=None):
def compose_ols(device_uuid, endpoint_uuids, topology_id=None):
device_id = json_device_id(device_uuid)
r_endpoints = [(endpoint_uuid, 'optical', []) for endpoint_uuid in endpoint_uuids]
r_endpoints = [json_endpoint_descriptor(endpoint_uuid, 'optical') for endpoint_uuid in endpoint_uuids]
config_rules = json_device_emulated_connect_rules(r_endpoints) if ADD_CONNECT_RULES_TO_DEVICES else []
endpoints = json_endpoints(device_id, r_endpoints, topology_id=topology_id)
j_endpoints = [] if ADD_CONNECT_RULES_TO_DEVICES else endpoints
......@@ -48,7 +49,7 @@ def compose_ols(device_uuid, endpoint_uuids, topology_id=None):
def compose_datacenter(device_uuid, endpoint_uuids, topology_id=None):
device_id = json_device_id(device_uuid)
r_endpoints = [(endpoint_uuid, 'copper', []) for endpoint_uuid in endpoint_uuids]
r_endpoints = [json_endpoint_descriptor(endpoint_uuid, 'copper') for endpoint_uuid in endpoint_uuids]
config_rules = json_device_emulated_connect_rules(r_endpoints) if ADD_CONNECT_RULES_TO_DEVICES else []
endpoints = json_endpoints(device_id, r_endpoints, topology_id=topology_id)
j_endpoints = [] if ADD_CONNECT_RULES_TO_DEVICES else endpoints
......