Commits (44)
......@@ -31,11 +31,11 @@ include:
- local: '/src/dbscanserving/.gitlab-ci.yml'
- local: '/src/opticalattackmitigator/.gitlab-ci.yml'
- local: '/src/opticalattackdetector/.gitlab-ci.yml'
# - local: '/src/opticalattackmanager/.gitlab-ci.yml'
#- local: '/src/opticalattackmanager/.gitlab-ci.yml'
- local: '/src/ztp/.gitlab-ci.yml'
- local: '/src/policy/.gitlab-ci.yml'
- local: '/src/forecaster/.gitlab-ci.yml'
- local: '/src/webui/.gitlab-ci.yml'
#- local: '/src/webui/.gitlab-ci.yml'
#- local: '/src/l3_distributedattackdetector/.gitlab-ci.yml'
#- local: '/src/l3_centralizedattackdetector/.gitlab-ci.yml'
#- local: '/src/l3_attackmitigator/.gitlab-ci.yml'
......
......@@ -364,7 +364,7 @@ for COMPONENT in $TFS_COMPONENTS; do
echo "Waiting for '$COMPONENT' component..."
COMPONENT_OBJNAME=$(echo "${COMPONENT}" | sed "s/\_/-/")
kubectl wait --namespace $TFS_K8S_NAMESPACE \
--for='condition=available' --timeout=300s deployment/${COMPONENT_OBJNAME}service
--for='condition=available' --timeout=90s deployment/${COMPONENT_OBJNAME}service
printf "\n"
done
......
# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: apps/v1
kind: Deployment
metadata:
name: opticalcontrollerservice
spec:
selector:
matchLabels:
app: opticalcontrollerservice
replicas: 1
template:
metadata:
labels:
app: opticalcontrollerservice
spec:
terminationGracePeriodSeconds: 5
containers:
- name: server
image: localhost:32000/tfs/opticalcontroller:dev
imagePullPolicy: Never
ports:
- containerPort: 10060
- containerPort: 9192
env:
- name: LOG_LEVEL
value: "INFO"
#readinessProbe:
# exec:
# command: ["/bin/grpc_health_probe", "-addr=:10060"]
#livenessProbe:
# exec:
# command: ["/bin/grpc_health_probe", "-addr=:10060"]
resources:
requests:
cpu: 500m
memory: 128Mi
limits:
cpu: 1000m
memory: 1024Mi
---
apiVersion: v1
kind: Service
metadata:
name: opticalcontrollerservice
labels:
app: opticalcontrollerservice
spec:
type: ClusterIP
selector:
app: opticalcontrollerservice
ports:
- name: grpc
protocol: TCP
port: 10060
targetPort: 10060
- name: metrics
protocol: TCP
port: 9192
targetPort: 9192
......@@ -25,9 +25,12 @@ export TFS_COMPONENTS="context device pathcomp service slice nbi webui load_gene
# Uncomment to activate Monitoring
#export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring"
# Uncomment to activate bgpls_speaker
# Uncomment to activate BGP-LS Speaker
#export TFS_COMPONENTS="${TFS_COMPONENTS} bgpls_speaker"
# Uncomment to activate Optical Controller
#export TFS_COMPONENTS="${TFS_COMPONENTS} opticalcontroller"
# Uncomment to activate ZTP
#export TFS_COMPONENTS="${TFS_COMPONENTS} ztp"
......
src/tests/ofc24/
\ No newline at end of file
......@@ -74,6 +74,16 @@ service ContextService {
rpc SetConnection (Connection ) returns ( ConnectionId ) {}
rpc RemoveConnection (ConnectionId ) returns ( Empty ) {}
rpc GetConnectionEvents(Empty ) returns (stream ConnectionEvent ) {}
// ------------------------------ Experimental -----------------------------
rpc GetOpticalConfig (Empty ) returns (OpticalConfigList ) {}
rpc SetOpticalConfig (OpticalConfig ) returns (OpticalConfigId ) {}
rpc SelectOpticalConfig(OpticalConfigId) returns (OpticalConfig ) {}
rpc SetOpticalLink (OpticalLink ) returns (Empty ) {}
rpc GetOpticalLink (OpticalLinkId ) returns (OpticalLink ) {}
rpc GetFiber (FiberId ) returns (Fiber ) {}
}
// ----- Generic -------------------------------------------------------------------------------------------------------
......@@ -203,6 +213,7 @@ enum DeviceDriverEnum {
DEVICEDRIVER_GNMI_OPENCONFIG = 8;
DEVICEDRIVER_FLEXSCALE = 9;
DEVICEDRIVER_IETF_ACTN = 10;
DEVICEDRIVER_OC = 11;
}
enum DeviceOperationalStatusEnum {
......@@ -288,6 +299,7 @@ enum ServiceTypeEnum {
SERVICETYPE_TAPI_CONNECTIVITY_SERVICE = 3;
SERVICETYPE_TE = 4;
SERVICETYPE_E2E = 5;
SERVICETYPE_OPTICAL_CONNECTIVITY = 6;
}
enum ServiceStatusEnum {
......@@ -613,3 +625,53 @@ message AuthenticationResult {
ContextId context_id = 1;
bool authenticated = 2;
}
// ---------------- Experimental ------------------------
message OpticalConfigId {
string opticalconfig_uuid = 1;
}
message OpticalConfig {
OpticalConfigId opticalconfig_id = 1;
string config = 2;
}
message OpticalConfigList {
repeated OpticalConfig opticalconfigs = 1;
}
// ---- Optical Link ----
message OpticalLinkId {
Uuid optical_link_uuid = 1;
}
message FiberId {
Uuid fiber_uuid = 1;
}
message Fiber {
string ID = 10;
string src_port = 1;
string dst_port = 2;
string local_peer_port = 3;
string remote_peer_port = 4;
repeated int32 c_slots = 5;
repeated int32 l_slots = 6;
repeated int32 s_slots = 7;
float length = 8;
bool used = 9;
FiberId fiber_uuid = 11;
}
message OpticalLinkDetails {
float length = 1;
string source = 2;
string target = 3;
repeated Fiber fibers = 4;
}
message OpticalLink {
string name = 1;
OpticalLinkDetails details = 2;
OpticalLinkId optical_link_uuid = 3;
}
// Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package openconfig_device;
import "context.proto";
service OpenConfigService {
rpc AddOpenConfigDevice (context.OpticalConfig) returns (context.OpticalConfigId) {}
rpc ConfigureOpticalDevice(context.OpticalConfig) returns (context.Empty ) {}
}
#!/bin/bash
# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
PROJECTDIR=`pwd`
RCFILE=$PROJECTDIR/coverage/.coveragerc
COVERAGEFILE=$PROJECTDIR/coverage/.coverage
# Destroy old coverage file and configure the correct folder on the .coveragerc file
rm -f $COVERAGEFILE
cat $PROJECTDIR/coverage/.coveragerc.template | sed s+~/tfs-ctrl+$PROJECTDIR+g > $RCFILE
echo
echo "Pre-test clean-up:"
echo "------------------"
docker rm -f redis opticalattackdetector dbscanserving
docker network rm tfs_br
echo
echo "Pull Docker images:"
echo "-------------------"
docker pull redis:7.0-alpine
echo
echo "Build optical attack detector:"
echo "------------------------------"
docker build -t "opticalattackdetector:latest" -f ./src/opticalattackdetector/Dockerfile .
docker images --filter="dangling=true" --quiet | xargs -r docker rmi
echo
echo "Build dbscan serving:"
echo "---------------------"
docker build -t "dbscanserving:latest" -f ./src/dbscanserving/Dockerfile .
docker images --filter="dangling=true" --quiet | xargs -r docker rmi
echo
echo "Create test environment:"
echo "------------------------"
export REDIS_PASSWORD=$(uuidgen)
docker network create -d bridge --subnet=172.254.254.0/24 --gateway=172.254.254.1 --ip-range=172.254.254.0/24 tfs_br
docker run --name redis -d --network=tfs_br -p 16379:6379 --rm \
--env REDIS_PASSWORD=${REDIS_PASSWORD} \
redis:7.0-alpine redis-server --requirepass ${REDIS_PASSWORD}
docker run --name dbscanserving -d --network=tfs_br -p 10008:10008 --rm \
--env LOG_LEVEL=DEBUG \
dbscanserving:latest "python -m dbscanserving.service"
echo
echo "Waiting for initialization..."
echo "-----------------------------"
while ! docker logs redis 2>&1 | grep -q 'Ready to accept connections'; do sleep 1; done
docker logs redis
#while ! docker logs dbscanserving 2>&1 | grep -q 'Server is ready'; do sleep 1; done
docker logs dbscanserving
#sleep 10
docker ps -a
echo
echo "Run unitary tests and analyze code coverage:"
echo "--------------------------------------------"
export REDIS_ADDRESS=$(docker inspect redis --format "{{.NetworkSettings.Networks.tfs_br.IPAddress}}")
export DBSCANSERVING_ADDRESS=$(docker inspect dbscanserving --format "{{.NetworkSettings.Networks.tfs_br.IPAddress}}")
docker run --name opticalattackdetector -d --network=tfs_br -p 10006:10006 --rm \
--env REDIS_PASSWORD=${REDIS_PASSWORD} \
--env DBSCANSERVINGSERVICE_SERVICE_HOST=${DBSCANSERVING_ADDRESS} \
--env CACHINGSERVICE_SERVICE_HOST=${REDIS_ADDRESS} \
opticalattackdetector:latest
sleep 5
docker ps -a
docker logs opticalattackdetector
docker exec -i opticalattackdetector bash -c "coverage run -m pytest --log-level=DEBUG --verbose opticalattackdetector/tests/test_unitary.py"
docker logs redis
docker logs dbscanserving
docker logs opticalattackdetector
echo
echo "Coverage report:"
echo "----------------"
docker exec -i opticalattackdetector bash -c "coverage report --include='opticalattackdetector/*' --sort cover --show-missing --skip-covered"
echo
echo "Post-test clean-up:"
echo "-------------------"
docker rm -f redis opticalattackdetector dbscanserving
docker network rm tfs_br
echo "Done!"
#!/bin/bash
# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
########################################################################################################################
# Define your deployment settings here
########################################################################################################################
# If not already set, set the name of the Kubernetes namespace to deploy to.
export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"}
########################################################################################################################
# Automated steps start here
########################################################################################################################
kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/opticalcontrollerservice -c server
......@@ -38,45 +38,45 @@ build bgpls_speaker:
- manifests/${IMAGE_NAME}service.yaml
- .gitlab-ci.yml
# Apply unit test to the component
unit_test bgpls_speaker:
variables:
IMAGE_NAME: 'bgpls_speaker' # name of the microservice
IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
stage: unit_test
needs:
- build bgpls_speaker
before_script:
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
- if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create --driver=bridge teraflowbridge; fi
- if docker container ls | grep $IMAGE_NAME; then docker rm -f $IMAGE_NAME; else echo "$IMAGE_NAME image is not in the system"; fi
script:
- docker pull "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
- docker run --name $IMAGE_NAME -d -p 20030:20030 -v "$PWD/src/$IMAGE_NAME/tests:/opt/results" --network=teraflowbridge $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG
- sleep 5
- docker ps -a
- docker logs $IMAGE_NAME
- docker exec -i $IMAGE_NAME bash -c "coverage run -m pytest --log-level=INFO --verbose $IMAGE_NAME/tests/test_unitary.py --junitxml=/opt/results/${IMAGE_NAME}_report.xml"
- docker exec -i $IMAGE_NAME bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing"
coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/'
after_script:
- docker rm -f $IMAGE_NAME
- docker network rm teraflowbridge
rules:
- if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
- if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
- changes:
- src/common/**/*.py
- proto/*.proto
- src/$IMAGE_NAME/**/*.{py,in,yml}
- src/$IMAGE_NAME/Dockerfile
- src/$IMAGE_NAME/tests/*.py
- manifests/${IMAGE_NAME}service.yaml
- .gitlab-ci.yml
artifacts:
when: always
reports:
junit: src/$IMAGE_NAME/tests/${IMAGE_NAME}_report.xml
## Apply unit test to the component
#unit_test bgpls_speaker:
# variables:
# IMAGE_NAME: 'bgpls_speaker' # name of the microservice
# IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
# stage: unit_test
# needs:
# - build bgpls_speaker
# before_script:
# - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
# - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create --driver=bridge teraflowbridge; fi
# - if docker container ls | grep $IMAGE_NAME; then docker rm -f $IMAGE_NAME; else echo "$IMAGE_NAME image is not in the system"; fi
# script:
# - docker pull "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
# - docker run --name $IMAGE_NAME -d -p 20030:20030 -v "$PWD/src/$IMAGE_NAME/tests:/opt/results" --network=teraflowbridge $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG
# - sleep 5
# - docker ps -a
# - docker logs $IMAGE_NAME
# - docker exec -i $IMAGE_NAME bash -c "coverage run -m pytest --log-level=INFO --verbose $IMAGE_NAME/tests/test_unitary.py --junitxml=/opt/results/${IMAGE_NAME}_report.xml"
# - docker exec -i $IMAGE_NAME bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing"
# coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/'
# after_script:
# - docker rm -f $IMAGE_NAME
# - docker network rm teraflowbridge
# rules:
# - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
# - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
# - changes:
# - src/common/**/*.py
# - proto/*.proto
# - src/$IMAGE_NAME/**/*.{py,in,yml}
# - src/$IMAGE_NAME/Dockerfile
# - src/$IMAGE_NAME/tests/*.py
# - manifests/${IMAGE_NAME}service.yaml
# - .gitlab-ci.yml
# artifacts:
# when: always
# reports:
# junit: src/$IMAGE_NAME/tests/${IMAGE_NAME}_report.xml
## Deployment of the service in Kubernetes Cluster
#deploy bgpls_speaker:
......
......@@ -14,7 +14,7 @@
import grpc, json, logging
from typing import List, Tuple, Union
from bgpls_speaker.service.tools.DiscoveredDBManager import DiscoveredDBManager, GetContextDevices
from bgpls_speaker.service.tools.DiscoveredDBManager import DiscoveredDBManager, GetContextDevices, getEndpointFromIpInterface
from bgpls_speaker.service.tools.GrpcServer import GrpcServer
from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method
from common.proto.context_pb2 import DeviceId, Empty, EndPointId, Link, LinkId, Uuid
......@@ -25,19 +25,6 @@ from common.proto.bgpls_pb2 import (
)
from common.proto.bgpls_pb2_grpc import BgplsServiceServicer
def json_to_list(json_str : str) -> List[Union[str, Tuple[str, str]]]:
try:
data = json.loads(json_str)
except: # pylint: disable=bare-except
return [('item', str(json_str))]
if isinstance(data, dict):
return [('kv', (key, value)) for key, value in data.items()]
elif isinstance(data, list):
return [('item', ', '.join(data))]
else:
return [('item', str(data))]
LOGGER = logging.getLogger(__name__)
METRICS_POOL = MetricsPool('Service', 'RPC')
......@@ -121,12 +108,14 @@ class BgplsServiceServicerImpl(BgplsServiceServicer):
"""
When a node is added to context via bgpls module this function checks if there are other nodes in the
topology connected by links discovered via bgpls. Then, if the link exist adds it to the context.
TODO: get endpoints from pce module
"""
node_name=request.nodeName
node_igp=self.discoveredDB.GetIgpIdFromNodeName(node_name)
LOGGER.debug("(NotifyAddNodeToContext) Find links to nodes ")
nodes_conected=self.discoveredDB.FindConnectedNodes(node_igp)
LOGGER.debug("(NotifyAddNodeToContext) Find links to nodes %s:%s",node_name,node_igp)
# Get nodes connected and links were the igpID appears
nodes_conected, links_local, links_remote=self.discoveredDB.FindConnectedNodes(node_igp)
o=[LOGGER.debug("(NotifyAddNodeToContext) Links local: %s %s",link_local.local_id, link_local.remote_id) for link_local in links_local]
o=[LOGGER.debug("(NotifyAddNodeToContext) Links remote: %s %s",links_remote.local_id,links_remote.remote_id) for links_remote in links_remote]
# Check if nodes are in context
context_client=ContextClient()
context_client.connect()
......@@ -138,55 +127,52 @@ class BgplsServiceServicerImpl(BgplsServiceServicer):
LOGGER.debug("(NotifyAddNodeToContext) nodes_conected_in_context: %s", nodes_conected_in_context)
# TODO: next to function
for remote_node in nodes_conected_in_context:
# TODO: get endpoints connected to remote ip (pce¿)
end_point1="eth-1/0/20"
end_point2="eth-1/0/20"
end_point_uuid1=Uuid(uuid=end_point1)
end_point_uuid2=Uuid(uuid=end_point2)
link_name_src_dest=node_name+"/"+end_point1+"=="+remote_node+"/"+end_point2
LOGGER.info("(NotifyAddNodeToContext) creating link to...: %s", remote_node)
remote_igp=self.discoveredDB.GetIgpIdFromNodeName(remote_node)
# Get source device from name
device_uuid_src=DeviceId(device_uuid=Uuid(uuid=node_name))
device_src=context_client.GetDevice(device_uuid_src)
link_name_dest_src=remote_node+"/"+end_point2+"=="+node_name+"/"+end_point1
# Get destination device from name
device_uuid_dest=DeviceId(device_uuid=Uuid(uuid=remote_node))
device_dest=context_client.GetDevice(device_uuid_dest)
self.getEndpointFromIpInterface(device_src,link.local_ipv4_id)
self.getEndpointFromIpInterface(device_dest,link.remote_ipv4_id)
# LOGGER.debug("(NotifyAddNodeToContext) Source: %s Destination: %s", device_src,device_dest)
end_point_id1=EndPointId(endpoint_uuid=end_point_uuid1,device_id=device_uuid_src)
end_point_id2=EndPointId(endpoint_uuid=end_point_uuid2,device_id=device_uuid_dest)
end_point_ids_src_dest=[end_point_id1,end_point_id2]
end_point_ids_dest_src=[end_point_id2,end_point_id1]
link_id_src=context_client.SetLink(Link(link_id=LinkId(link_uuid=Uuid(uuid=link_name_src_dest)),
link_endpoint_ids=end_point_ids_src_dest))
link_id_dst=context_client.SetLink(Link(link_id=LinkId(link_uuid=Uuid(uuid=link_name_dest_src)),
link_endpoint_ids=end_point_ids_dest_src))
# Here I assume one link will always have same link in other direction
# First direction for link
# Get endpoints associated to link between devices
for link_local in links_local:
LOGGER.debug("(NotifyAddNodeToContext) local: %s %s", link_local.local_id,link_local.remote_id)
LOGGER.debug("(NotifyAddNodeToContext) matches: %s %s", node_igp,remote_igp)
if link_local.local_id == node_igp and link_local.remote_id == remote_igp:
LOGGER.debug("(NotifyAddNodeToContext) local_ipv4_id: %s", link_local.local_ipv4_id)
end_point1,ip_1=getEndpointFromIpInterface(device_src,link_local.local_ipv4_id)
LOGGER.debug("(NotifyAddNodeToContext) end_point1: %s", end_point1)
LOGGER.debug("(NotifyAddNodeToContext) remote_ipv4_id: %s", link_local.remote_ipv4_id)
end_point2,ip_2=getEndpointFromIpInterface(device_dest,link_local.remote_ipv4_id)
LOGGER.debug("(NotifyAddNodeToContext) end_point2: %s", end_point2)
# LOGGER.debug("(NotifyAddNodeToContext) Source: %s Destination: %s", end_point1,end_point2)
link_name_src_dest=node_name+"/"+end_point1+"=="+remote_node+"/"+end_point2
end_point_uuid1=Uuid(uuid=end_point1)
end_point_uuid2=Uuid(uuid=end_point2)
end_point_id1=EndPointId(endpoint_uuid=end_point_uuid1,device_id=device_uuid_src)
link_name_dest_src=remote_node+"/"+end_point2+"=="+node_name+"/"+end_point1
end_point_id2=EndPointId(endpoint_uuid=end_point_uuid2,device_id=device_uuid_dest)
end_point_ids_src_dest=[end_point_id1,end_point_id2]
end_point_ids_dest_src=[end_point_id2,end_point_id1]
link_id_src=context_client.SetLink(Link(link_id=LinkId(link_uuid=Uuid(uuid=link_name_src_dest)),
link_endpoint_ids=end_point_ids_src_dest))
link_id_dst=context_client.SetLink(Link(link_id=LinkId(link_uuid=Uuid(uuid=link_name_dest_src)),
link_endpoint_ids=end_point_ids_dest_src))
LOGGER.debug("(NotifyAddNodeToContext) Link set id src--->dst: %s", link_id_src)
context_client.close()
return Empty()
def getEndpointFromIpInterface(self,device,ipv4):
"""
Get TFS endpoint from interface IPv4.
"""
for config in device.device_config.config_rules:
if config.WhichOneof('config_rule') == 'custom':
for item_type, item in json_to_list(config.custom.resource_value):
if item_type == 'kv':
# LOGGER.debug("(getEndpointFromIpInterface) item: %s",item)
endpoint=item
LOGGER.debug("(getEndpointFromIpInterface) config: %s",config.custom.resource_key)
if "/interface" in config.custom.resource_key:
interface=config.custom.resource_key.split("/interface")[1].strip("[]")
LOGGER.debug("(getEndpointFromIpInterface) interface: %s",interface)
if ipv4 in config.custom.resource_value:
LOGGER.debug("(getEndpointFromIpInterface) value: %s",config.custom.resource_value)
return endpoint
\ No newline at end of file
......@@ -97,16 +97,16 @@ public class grpcClient {
strIgpL=link.getLocalNodeIGPId().toString();
}
String ipv4R;
if(link.getiPv4RouterIDNeighborNodeLATLV()==null)
if(link.getiPv4RouterIDLocalNodeLATLV()==null)
ipv4R="-";
else {
ipv4R=link.getiPv4RouterIDNeighborNodeLATLV();
ipv4R=link.getiPv4RouterIDLocalNodeLATLV();
}
String ipv4L;
if(link.getiPv4RouterIDLocalNodeLATLV()==null)
if(link.getiPv4RouterIDNeighborNodeLATLV()==null)
ipv4L="-";
else {
ipv4L=link.getiPv4RouterIDLocalNodeLATLV();
ipv4L=link.getiPv4RouterIDNeighborNodeLATLV();
}
// Build link for grpc message. need non null values in some cases
......
......@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
from typing import List, Tuple, Union
from bgpls_speaker.service.tools.Tools import UpdateRequest,NodeInfo,LinkInfo
from common.proto.bgpls_pb2 import NodeDescriptors
from common.proto.context_pb2 import ContextId, ContextList,Topology,TopologyId,Device,DeviceDriverEnum,ContextId,Empty, TopologyList
......@@ -20,9 +20,22 @@ from common.Constants import DEFAULT_CONTEXT_NAME
from common.tools.object_factory.Context import json_context_id
from context.client.ContextClient import ContextClient
import logging
import logging,json
LOGGER = logging.getLogger(__name__)
def json_to_list(json_str : str) -> List[Union[str, Tuple[str, str]]]:
try:
data = json.loads(json_str)
except: # pylint: disable=bare-except
return [('item', str(json_str))]
if isinstance(data, dict):
return [('kv', (key, value)) for key, value in data.items()]
elif isinstance(data, list):
return [('item', ', '.join(data))]
else:
return [('item', str(data))]
class DiscoveredDBManager:
def __init__(self):
self.discoveredDB=[]
......@@ -37,15 +50,16 @@ class DiscoveredDBManager:
# Check if node info message
if(self.checkIfNodeInUpdate(update_request)):
# Check if node exists
to_add=True
node_count=len(update_request.nodes)
for node in update_request.nodes:
if(self.CheckIfNodeNameInDb(node) or CheckIfNodeInContext(node.node_name)):
# Replace info from node if exists
LOGGER.debug("(AddToDB) Node already in DB!!!")
to_add=False
update_request.nodes.remove(node)
node_count=node_count-1
else:
LOGGER.debug("(AddToDB) Node NOT in DB!!!")
if(to_add):
if(node_count>0):
self.discoveredDB.append(update_request)
else:
# is a link
......@@ -127,7 +141,9 @@ class DiscoveredDBManager:
Return the IGP ID given a node name if exists in the discoveredDB.
"""
for update in self.discoveredDB:
LOGGER.debug("(GetIgpIdFromNodeName)checking update: %s",update.toString())
for node in update.nodes:
LOGGER.debug("(GetIgpIdFromNodeName)checking nodes: %s",node.node_name)
if(node.node_name==name):
return node.igp_id
return None
......@@ -165,6 +181,8 @@ class DiscoveredDBManager:
# find links where the node appears
links_to_node=[]
nodes_conected=[]
link_local=[]
link_remote=[]
for update in self.discoveredDB:
for link in update.links:
LOGGER.debug("(FindConnectedNodes) link in up:%s %s",
......@@ -173,14 +191,16 @@ class DiscoveredDBManager:
if(link.local_id == new_node):
links_to_node.append(link)
nodes_conected.append(link.remote.node_name)
link_local.append(link)
if(link.remote_id == new_node):
links_to_node.append(link)
nodes_conected.append(link.local.node_name)
link_remote.append(link)
if(nodes_conected):
LOGGER.debug("(FindConnectedNodes) links to local node:%s",new_node)
LOGGER.debug("(FindConnectedNodes) %s", nodes_conected)
return nodes_conected
return nodes_conected, link_local, link_remote
LOGGER.debug("(FindConnectedNodes) NO LINKS TO OTHER NODES")
return None
......@@ -263,3 +283,36 @@ def CheckIfNodeInContext(node_name) -> bool:
return True
LOGGER.info("(CheckIfNodeInContext) Node NOT in context")
return False
def getEndpointFromIpInterface(device,ipv4):
"""
Get TFS endpoint uuid drom given device having the IPv4 interface.
"""
for config in device.device_config.config_rules:
if config.WhichOneof('config_rule') == 'custom':
# for item_type, item in json_to_list(config.custom.resource_value):
# if item_type == 'kv':
# # LOGGER.debug("(getEndpointFromIpInterface) item: %s",item)
# endpoint_item=item
# LOGGER.debug("(getEndpointFromIpInterface) config: %s",config.custom.resource_key)
if "/interface" in config.custom.resource_key:
iface=config.custom.resource_key.split("/interface")[1].strip("[]")
LOGGER.debug("(getEndpointFromIpInterface) interface: %s",iface)
if ipv4 in config.custom.resource_value:
LOGGER.debug("(getEndpointFromIpInterface) value: %s",config.custom.resource_value)
resource_dict=json.loads(config.custom.resource_value)
interface = resource_dict['name']
resource_ip=resource_dict['address_ip']
# Search for endpoint uuid assigned to interface
for config in device.device_config.config_rules:
if config.WhichOneof('config_rule') == 'custom':
if "/endpoints/endpoint" in config.custom.resource_key:
key=config.custom.resource_key.split("/endpoints/endpoint")[1].strip("[]")
LOGGER.debug("(getEndpointFromIpInterface) key: %s",key)
if interface in key:
LOGGER.debug("(getEndpointFromIpInterface) value: %s",config.custom.resource_value)
endpoint=config.custom.resource_key.split("/endpoints/endpoint")[1].strip("[]")
resource_dict_endpoint=json.loads(config.custom.resource_value)
return resource_dict_endpoint['uuid'],resource_ip
return None,ipv4
\ No newline at end of file
......@@ -47,12 +47,13 @@ class UpdateRequest:
)
def toString(self):
# Debug purposes
out = ""
out = " "
out+=self.address_family_id
out+=self.next_hop
out+=self.as_path_segment
for node in self.nodes:
out+="name"
out+=node.node_name
out+=node.igp_id
out+=str(node.bgpls_id)
......@@ -84,7 +85,15 @@ class NodeInfo:
self.bgpls_id = bgpls_id.strip("/")
self.as_id = as_id
self.learnt_from=learnt_from
def toString(self):
# Debug purposes
out = "name"
out+=self.node_name
out+=self.igp_id
out+=str(self.bgpls_id)
out+=str(self.as_id)
out+=self.learnt_from
@classmethod
def from_proto(cls, proto_node):
return cls(
......
......@@ -59,6 +59,7 @@ class ServiceNameEnum(Enum):
TE = 'te'
FORECASTER = 'forecaster'
E2EORCHESTRATOR = 'e2eorchestrator'
OPTICALCONTROLLER = 'opticalcontroller'
BGPLS = 'bgpls-speaker'
# Used for test and debugging only
......@@ -87,6 +88,7 @@ DEFAULT_SERVICE_GRPC_PORTS = {
ServiceNameEnum.TE .value : 10030,
ServiceNameEnum.FORECASTER .value : 10040,
ServiceNameEnum.E2EORCHESTRATOR .value : 10050,
ServiceNameEnum.OPTICALCONTROLLER .value : 10060,
ServiceNameEnum.BGPLS .value : 20030,
# Used for test and debugging only
......
......@@ -15,6 +15,7 @@
import copy, json
from typing import Dict, List, Optional, Tuple, Union
from common.DeviceTypes import DeviceTypeEnum
from common.proto.context_pb2 import DeviceDriverEnum
def get_descriptors_add_contexts(contexts : List[Dict]) -> List[Dict]:
contexts_add = copy.deepcopy(contexts)
......@@ -95,7 +96,8 @@ def split_devices_by_rules(devices : List[Dict]) -> Tuple[List[Dict], List[Dict]
if len(connect_rules) > 0:
device_add = copy.deepcopy(device)
device_add['device_endpoints'] = []
if (device['device_drivers'][0] != DeviceDriverEnum.DEVICEDRIVER_OC):
device_add['device_endpoints'] = []
device_add['device_config'] = {'config_rules': connect_rules}
devices_add.append(device_add)
......
......@@ -26,7 +26,9 @@ from common.proto.context_pb2 import (
Link, LinkEvent, LinkId, LinkIdList, LinkList,
Service, ServiceEvent, ServiceFilter, ServiceId, ServiceIdList, ServiceList,
Slice, SliceEvent, SliceFilter, SliceId, SliceIdList, SliceList,
Topology, TopologyDetails, TopologyEvent, TopologyId, TopologyIdList, TopologyList)
Topology, TopologyDetails, TopologyEvent, TopologyId, TopologyIdList, TopologyList,
OpticalConfig, OpticalConfigId, OpticalConfigList
)
from common.proto.context_pb2_grpc import ContextServiceStub
from common.proto.context_policy_pb2_grpc import ContextPolicyServiceStub
from common.proto.policy_pb2 import PolicyRuleIdList, PolicyRuleId, PolicyRuleList, PolicyRule
......@@ -436,3 +438,26 @@ class ContextClient:
response = self.policy_stub.RemovePolicyRule(request)
LOGGER.debug('RemovePolicyRule result: {:s}'.format(grpc_message_to_json_string(response)))
return response
#//////////////// Experimental //////////////////
@RETRY_DECORATOR
def SetOpticalConfig(self, request : OpticalConfig) -> OpticalConfigId:
LOGGER.debug('SetOpticalConfig request: {:s}'.format(grpc_message_to_json_string(request)))
response = self.stub.SetOpticalConfig(request)
LOGGER.debug('SetOpticalConfig result: {:s}'.format(grpc_message_to_json_string(response)))
return response
@RETRY_DECORATOR
def GetOpticalConfig(self, request : Empty) -> OpticalConfigList:
LOGGER.debug('GetOpticalConfig request: {:s}'.format(grpc_message_to_json_string(request)))
response = self.stub.GetOpticalConfig(request)
LOGGER.debug('GetOpticalConfig result: {:s}'.format(grpc_message_to_json_string(response)))
return response
@RETRY_DECORATOR
def SelectOpticalConfig(self,request : OpticalConfigId) -> OpticalConfigList:
LOGGER.debug('SelectOpticalConfig request: {:s}'.format(grpc_message_to_json_string(request)))
response = self.stub.SelectOpticalConfig(request)
LOGGER.debug('SelectOpticalConfig result: {:s}'.format(grpc_message_to_json_string(response)))
return response
......@@ -23,7 +23,9 @@ from common.proto.context_pb2 import (
Link, LinkEvent, LinkId, LinkIdList, LinkList,
Service, ServiceEvent, ServiceFilter, ServiceId, ServiceIdList, ServiceList,
Slice, SliceEvent, SliceFilter, SliceId, SliceIdList, SliceList,
Topology, TopologyDetails, TopologyEvent, TopologyId, TopologyIdList, TopologyList)
Topology, TopologyDetails, TopologyEvent, TopologyId, TopologyIdList, TopologyList,
OpticalConfigList, OpticalConfigId, OpticalConfig
)
from common.proto.policy_pb2 import PolicyRuleIdList, PolicyRuleId, PolicyRuleList, PolicyRule
from common.proto.context_pb2_grpc import ContextServiceServicer
from common.proto.context_policy_pb2_grpc import ContextPolicyServiceServicer
......@@ -43,6 +45,7 @@ from .database.Slice import (
slice_delete, slice_get, slice_list_ids, slice_list_objs, slice_select, slice_set, slice_unset)
from .database.Topology import (
topology_delete, topology_get, topology_get_details, topology_list_ids, topology_list_objs, topology_set)
from .database.OpticalConfig import set_opticalconfig, select_opticalconfig, get_opticalconfig
LOGGER = logging.getLogger(__name__)
......@@ -296,3 +299,22 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer
@safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
def RemovePolicyRule(self, request : PolicyRuleId, context: grpc.ServicerContext) -> Empty:
return policyrule_delete(self.db_engine, self.messagebroker, request)
# ---------------------------- Experimental -------------------
@safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
def GetOpticalConfig(self, request : Empty, context : grpc.ServicerContext) -> OpticalConfigList:
result = get_opticalconfig(self.db_engine)
return OpticalConfigList(OpticalConfigs=result)
@safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
def SetOpticalConfig(self, request : OpticalConfig, context : grpc.ServicerContext) -> OpticalConfigId:
result = set_opticalconfig(self.db_engine, request)
return OpticalConfigId(**result)
@safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
def SelectOpticalConfig(self, request : OpticalConfigId, context : grpc.ServicerContext) -> OpticalConfig:
result = select_opticalconfig(self.db_engine, request)
optical_config_id = OpticalConfigId()
optical_config_id.CopyFrom(result.OpticalConfig_id)
return OpticalConfig(config=result.config, OpticalConfig_id=optical_config_id)
......@@ -21,7 +21,9 @@ from typing import Dict, List, Optional, Set, Tuple
from common.method_wrappers.ServiceExceptions import InvalidArgumentException, NotFoundException
from common.message_broker.MessageBroker import MessageBroker
from common.proto.context_pb2 import (
Device, DeviceFilter, DeviceId, DeviceIdList, DeviceList, Empty, EventTypeEnum, TopologyId)
Device, DeviceDriverEnum, DeviceFilter, DeviceId, DeviceIdList, DeviceList,
Empty, EventTypeEnum, TopologyId
)
from common.tools.grpc.Tools import grpc_message_to_json_string
from common.tools.object_factory.Device import json_device_id
from context.service.database.uuids.Topology import topology_get_uuid
......@@ -103,10 +105,12 @@ def device_set(db_engine : Engine, messagebroker : MessageBroker, request : Devi
})
topology_uuids.add(topology_uuid)
is_oc_driver = DeviceDriverEnum.DEVICEDRIVER_OC in set(request.device_drivers)
endpoints_data : List[Dict] = list()
for i, endpoint in enumerate(request.device_endpoints):
endpoint_device_uuid = endpoint.endpoint_id.device_id.device_uuid.uuid
if len(endpoint_device_uuid) == 0: endpoint_device_uuid = device_uuid
if len(endpoint_device_uuid) == 0 or is_oc_driver : endpoint_device_uuid = device_uuid
if endpoint_device_uuid not in {raw_device_uuid, device_uuid}:
raise InvalidArgumentException(
'request.device_endpoints[{:d}].device_id.device_uuid.uuid'.format(i), endpoint_device_uuid,
......@@ -302,4 +306,4 @@ def device_select(db_engine : Engine, request : DeviceFilter) -> DeviceList:
obj_list : List[DeviceModel] = query.filter(DeviceModel.device_uuid.in_(device_uuids)).all()
return [obj.dump(**dump_params) for obj in obj_list]
devices = run_transaction(sessionmaker(bind=db_engine), callback)
return DeviceList(devices=devices)
\ No newline at end of file
return DeviceList(devices=devices)
# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json, logging
from sqlalchemy.dialects.postgresql import insert
from sqlalchemy.engine import Engine
from sqlalchemy.orm import Session, sessionmaker
from sqlalchemy_cockroachdb import run_transaction
from common.proto.context_pb2 import OpticalConfig, OpticalConfigId
from .models.OpticalConfigModel import OpticalConfigModel
LOGGER = logging.getLogger(__name__)
def get_opticalconfig(db_engine : Engine):
def callback(session:Session):
optical_configs = list()
results = session.query(OpticalConfigModel).all()
for obj in results:
optical_config = OpticalConfig()
optical_config.config = json.dump(obj.config)
optical_config.opticalconfig_id.opticalconfig_uuid = obj.opticalconfig_uuid
optical_configs.append(optical_config)
return optical_configs
obj = run_transaction(sessionmaker(bind=db_engine), callback)
return obj
def set_opticalconfig(db_engine : Engine, request : OpticalConfig):
opticalconfig_id = OpticalConfigId()
opticalconfig_id.opticalconfig_uuid = request.opticalconfig_id.opticalconfig_uuid
my_config_data = []
if request.config:
channels = []
transceivers = []
config = json.loads(request.config)
if 'channels' in config and len(config['channels']) > 0:
channels = [channel['name']['index'] for channel in config['channels']]
if 'transceivers' in config and len(config['transceivers']['transceiver']) > 0:
transceivers = [transceiver for transceiver in config['transceivers']['transceiver']]
my_config_data = [
{
"opticalconfig_uuid": request.opticalconfig_id.opticalconfig_uuid,
"channels" : channels,
"transcievers" : transceivers,
"interfaces" : json.dumps(config["interfaces"]["interface"]),
"channel_namespace" : config["channel_namespace"],
"endpoints" : [json.dumps(endpoint) for endpoint in config["endpoints"]],
"frequency" : config["frequency"] if "frequency" in config else 0,
"operational_mode" : config["operational_mode"] if "operational_mode" in config else 0,
"output_power" : config["output_power"] if "output_power" in config else '',
}
]
def callback(session:Session)->bool:
stmt = insert(OpticalConfigModel).values(my_config_data)
stmt = stmt.on_conflict_do_update(
index_elements=[OpticalConfigModel.opticalconfig_uuid],
set_=dict(
channel_namespace=stmt.excluded.channel_namespace
)
)
stmt = stmt.returning(OpticalConfigModel.opticalconfig_uuid)
id = session.execute(stmt).fetchone()
opticalconfig_id = run_transaction(sessionmaker(bind=db_engine), callback)
return {'opticalconfig_uuid': opticalconfig_id}
def select_opticalconfig(db_engine:Engine,request:OpticalConfigId):
def callback(session : Session) -> OpticalConfig:
result = OpticalConfig()
stmt = session.query(OpticalConfigModel)
stmt = stmt.filter_by(opticalconfig_uuid=request.opticalconfig_uuid)
obj = stmt.first()
if obj is not None:
result.config = json.dumps(obj.dump())
result.opticalconfig_id.opticalconfig_uuid = obj.opticalconfig_uuid
return result
return run_transaction(sessionmaker(bind=db_engine, expire_on_commit=False), callback)