Commits (24)
......@@ -171,5 +171,8 @@ local_k8s_deployment.sh
# asdf configuration
.tool-versions
# libyang build files
libyang/
# Other logs
**/logs/*.log.*
......@@ -18,6 +18,11 @@
# Read deployment settings
########################################################################################################################
# ----- Redeploy All ------------------------------------------------------------
# If not already set, enables all components redeployment
export REDEPLOYALL=${REDEPLOYALL:-""}
# ----- TeraFlowSDN ------------------------------------------------------------
......@@ -102,6 +107,14 @@ export NATS_EXT_PORT_CLIENT=${NATS_EXT_PORT_CLIENT:-"4222"}
# If not already set, set the external port NATS HTTP Mgmt GUI interface will be exposed to.
export NATS_EXT_PORT_HTTP=${NATS_EXT_PORT_HTTP:-"8222"}
# If not already set, set NATS installation mode. Accepted values are: 'single' and 'cluster'.
# - If NATS_DEPLOY_MODE is "single", NATS is deployed in single node mode. It is convenient for
# development and testing purposes and should fit in a VM. IT SHOULD NOT BE USED IN PRODUCTION ENVIRONMENTS.
# - If NATS_DEPLOY_MODE is "cluster", NATS is deployed in cluster mode, and an entire NATS cluster
# with 3 replicas (set by default) will be deployed. It is convenient for production and
# provides scalability features.
export NATS_DEPLOY_MODE=${NATS_DEPLOY_MODE:-"single"}
# If not already set, disable flag for re-deploying NATS from scratch.
# WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE MESSAGE BROKER INFORMATION!
# If NATS_REDEPLOY is "YES", the message broker will be dropped while checking/deploying NATS.
......
......@@ -18,6 +18,11 @@
# Read deployment settings
########################################################################################################################
# ----- Redeploy All ------------------------------------------------------------
# If not already set, enables all components redeployment
export REDEPLOYALL=${REDEPLOYALL:-""}
# If not already set, set the namespace where CockroackDB will be deployed.
export CRDB_NAMESPACE=${CRDB_NAMESPACE:-"crdb"}
......@@ -223,7 +228,7 @@ function crdb_deploy_cluster() {
kubectl create namespace ${CRDB_NAMESPACE}
echo
echo "CockroachDB"
echo "CockroachDB (cluster-mode)"
echo ">>> Checking if CockroachDB is deployed..."
if kubectl get --namespace ${CRDB_NAMESPACE} statefulset/cockroachdb &> /dev/null; then
echo ">>> CockroachDB is present; skipping step."
......@@ -360,7 +365,7 @@ function crdb_drop_database_cluster() {
}
if [ "$CRDB_DEPLOY_MODE" == "single" ]; then
if [ "$CRDB_REDEPLOY" == "YES" ]; then
if [ "$CRDB_REDEPLOY" == "YES" ] || [ "$REDEPLOYALL" == "YES" ]; then
crdb_undeploy_single
fi
......@@ -370,7 +375,7 @@ if [ "$CRDB_DEPLOY_MODE" == "single" ]; then
crdb_drop_database_single
fi
elif [ "$CRDB_DEPLOY_MODE" == "cluster" ]; then
if [ "$CRDB_REDEPLOY" == "YES" ]; then
if [ "$CRDB_REDEPLOY" == "YES" ] || [ "$REDEPLOYALL" == "YES" ]; then
crdb_undeploy_cluster
fi
......
......@@ -18,6 +18,10 @@
# Read deployment settings
########################################################################################################################
# ----- Redeploy All ------------------------------------------------------------
# If not already set, enables all components redeployment
export REDEPLOYALL=${REDEPLOYALL:-""}
# If not already set, set the namespace where NATS will be deployed.
export NATS_NAMESPACE=${NATS_NAMESPACE:-"nats"}
......@@ -27,16 +31,31 @@ export NATS_EXT_PORT_CLIENT=${NATS_EXT_PORT_CLIENT:-"4222"}
# If not already set, set the external port NATS HTTP Mgmt GUI interface will be exposed to.
export NATS_EXT_PORT_HTTP=${NATS_EXT_PORT_HTTP:-"8222"}
# If not already set, set NATS installation mode. Accepted values are: 'single' and 'cluster'.
# - If NATS_DEPLOY_MODE is "single", NATS is deployed in single node mode. It is convenient for
# development and testing purposes and should fit in a VM. IT SHOULD NOT BE USED IN PRODUCTION ENVIRONMENTS.
# - If NATS_DEPLOY_MODE is "cluster", NATS is deployed in cluster mode, and an entire NATS cluster
# with 3 replicas (set by default) will be deployed. It is convenient for production and
# provides scalability features.
export NATS_DEPLOY_MODE=${NATS_DEPLOY_MODE:-"single"}
# If not already set, disable flag for re-deploying NATS from scratch.
# WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE MESSAGE BROKER INFORMATION!
# If NATS_REDEPLOY is "YES", the message broker will be dropped while checking/deploying NATS.
export NATS_REDEPLOY=${NATS_REDEPLOY:-""}
########################################################################################################################
# Automated steps start here
########################################################################################################################
# Constants
TMP_FOLDER="./tmp"
NATS_MANIFESTS_PATH="manifests/nats"
# Create a tmp folder for files modified during the deployment
TMP_MANIFESTS_FOLDER="${TMP_FOLDER}/${NATS_NAMESPACE}/manifests"
mkdir -p $TMP_MANIFESTS_FOLDER
function nats_deploy_single() {
echo "NATS Namespace"
echo ">>> Create NATS Namespace (if missing)"
......@@ -47,18 +66,86 @@ function nats_deploy_single() {
helm3 repo add nats https://nats-io.github.io/k8s/helm/charts/
echo
echo "Install NATS (single-node)"
echo ">>> Checking if NATS is deployed..."
if kubectl get --namespace ${NATS_NAMESPACE} statefulset/${NATS_NAMESPACE} &> /dev/null; then
echo ">>> NATS is present; skipping step."
else
echo ">>> Deploy NATS"
helm3 install ${NATS_NAMESPACE} nats/nats --namespace ${NATS_NAMESPACE} --set nats.image=nats:2.9-alpine --set config.cluster.enabled=true --set config.cluster.tls.enabled=true
echo ">>> Waiting NATS statefulset to be created..."
while ! kubectl get --namespace ${NATS_NAMESPACE} statefulset/${NATS_NAMESPACE} &> /dev/null; do
printf "%c" "."
sleep 1
done
# Wait for statefulset condition "Available=True" does not work
# Wait for statefulset condition "jsonpath='{.status.readyReplicas}'=3" throws error:
# "error: readyReplicas is not found"
# Workaround: Check the pods are ready
#echo ">>> NATS statefulset created. Waiting for readiness condition..."
#kubectl wait --namespace ${NATS_NAMESPACE} --for=condition=Available=True --timeout=300s statefulset/nats
#kubectl wait --namespace ${NATS_NAMESPACE} --for=jsonpath='{.status.readyReplicas}'=3 --timeout=300s \
# statefulset/nats
echo ">>> NATS statefulset created. Waiting NATS pods to be created..."
while ! kubectl get --namespace ${NATS_NAMESPACE} pod/${NATS_NAMESPACE}-0 &> /dev/null; do
printf "%c" "."
sleep 1
done
kubectl wait --namespace ${NATS_NAMESPACE} --for=condition=Ready --timeout=300s pod/${NATS_NAMESPACE}-0
fi
echo
echo "NATS Port Mapping"
echo ">>> Expose NATS Client port (4222->${NATS_EXT_PORT_CLIENT})"
NATS_PORT_CLIENT=$(kubectl --namespace ${NATS_NAMESPACE} get service ${NATS_NAMESPACE} -o 'jsonpath={.spec.ports[?(@.name=="client")].port}')
PATCH='{"data": {"'${NATS_EXT_PORT_CLIENT}'": "'${NATS_NAMESPACE}'/'${NATS_NAMESPACE}':'${NATS_PORT_CLIENT}'"}}'
kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}"
PORT_MAP='{"containerPort": '${NATS_EXT_PORT_CLIENT}', "hostPort": '${NATS_EXT_PORT_CLIENT}'}'
CONTAINER='{"name": "nginx-ingress-microk8s", "ports": ['${PORT_MAP}']}'
PATCH='{"spec": {"template": {"spec": {"containers": ['${CONTAINER}']}}}}'
kubectl patch daemonset nginx-ingress-microk8s-controller --namespace ingress --patch "${PATCH}"
echo
echo ">>> Expose NATS HTTP Mgmt GUI port (8222->${NATS_EXT_PORT_HTTP})"
NATS_PORT_HTTP=$(kubectl --namespace ${NATS_NAMESPACE} get service ${NATS_NAMESPACE} -o 'jsonpath={.spec.ports[?(@.name=="monitor")].port}')
PATCH='{"data": {"'${NATS_EXT_PORT_HTTP}'": "'${NATS_NAMESPACE}'/'${NATS_NAMESPACE}':'${NATS_PORT_HTTP}'"}}'
kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}"
PORT_MAP='{"containerPort": '${NATS_EXT_PORT_HTTP}', "hostPort": '${NATS_EXT_PORT_HTTP}'}'
CONTAINER='{"name": "nginx-ingress-microk8s", "ports": ['${PORT_MAP}']}'
PATCH='{"spec": {"template": {"spec": {"containers": ['${CONTAINER}']}}}}'
kubectl patch daemonset nginx-ingress-microk8s-controller --namespace ingress --patch "${PATCH}"
echo
}
function nats_deploy_cluster() {
echo "NATS Namespace"
echo ">>> Create NATS Namespace (if missing)"
kubectl create namespace ${NATS_NAMESPACE}
echo
echo "Add NATS Helm Chart"
helm3 repo add nats https://nats-io.github.io/k8s/helm/charts/
echo
echo "Upgrade NATS Helm Chart"
helm3 repo update nats
echo
echo "Install NATS (single-node)"
echo "Install NATS (cluster-mode)"
echo ">>> Checking if NATS is deployed..."
if kubectl get --namespace ${NATS_NAMESPACE} statefulset/${NATS_NAMESPACE} &> /dev/null; then
echo ">>> NATS is present; skipping step."
else
echo ">>> Deploy NATS"
helm3 install ${NATS_NAMESPACE} nats/nats --namespace ${NATS_NAMESPACE} --set nats.image=nats:2.9-alpine
cp "${NATS_MANIFESTS_PATH}/cluster.yaml" "${TMP_MANIFESTS_FOLDER}/nats_cluster.yaml"
helm3 install ${NATS_NAMESPACE} nats/nats --namespace ${NATS_NAMESPACE} -f "${TMP_MANIFESTS_FOLDER}/nats_cluster.yaml"
echo ">>> Waiting NATS statefulset to be created..."
while ! kubectl get --namespace ${NATS_NAMESPACE} statefulset/${NATS_NAMESPACE} &> /dev/null; do
printf "%c" "."
......@@ -78,7 +165,17 @@ function nats_deploy_single() {
printf "%c" "."
sleep 1
done
while ! kubectl get --namespace ${NATS_NAMESPACE} pod/${NATS_NAMESPACE}-1 &> /dev/null; do
printf "%c" "."
sleep 1
done
while ! kubectl get --namespace ${NATS_NAMESPACE} pod/${NATS_NAMESPACE}-2 &> /dev/null; do
printf "%c" "."
sleep 1
done
kubectl wait --namespace ${NATS_NAMESPACE} --for=condition=Ready --timeout=300s pod/${NATS_NAMESPACE}-0
kubectl wait --namespace ${NATS_NAMESPACE} --for=condition=Ready --timeout=300s pod/${NATS_NAMESPACE}-1
kubectl wait --namespace ${NATS_NAMESPACE} --for=condition=Ready --timeout=300s pod/${NATS_NAMESPACE}-2
fi
echo
......@@ -110,7 +207,7 @@ function nats_deploy_single() {
echo
}
function nats_undeploy_single() {
function nats_undeploy() {
echo "NATS"
echo ">>> Checking if NATS is deployed..."
if kubectl get --namespace ${NATS_NAMESPACE} statefulset/${NATS_NAMESPACE} &> /dev/null; then
......@@ -127,8 +224,14 @@ function nats_undeploy_single() {
echo
}
if [ "$NATS_REDEPLOY" == "YES" ]; then
nats_undeploy_single
if [ "$NATS_REDEPLOY" == "YES" ] || [ "$REDEPLOYALL" == "YES" ]; then
nats_undeploy
fi
nats_deploy_single
if [ "$NATS_DEPLOY_MODE" == "single" ]; then
nats_deploy_single
elif [ "$NATS_DEPLOY_MODE" == "cluster" ]; then
nats_deploy_cluster
else
echo "Unsupported value: NATS_DEPLOY_MODE=$NATS_DEPLOY_MODE"
fi
\ No newline at end of file
......@@ -18,6 +18,10 @@
# Read deployment settings
########################################################################################################################
# ----- Redeploy All ------------------------------------------------------------
# If not already set, enables all components redeployment
export REDEPLOYALL=${REDEPLOYALL:-""}
# If not already set, set the namespace where QuestDB will be deployed.
export QDB_NAMESPACE=${QDB_NAMESPACE:-"qdb"}
......@@ -177,7 +181,7 @@ function qdb_drop_tables() {
echo
}
if [ "$QDB_REDEPLOY" == "YES" ]; then
if [ "$QDB_REDEPLOY" == "YES" ] || [ "$REDEPLOYALL" == "YES" ]; then
qdb_undeploy
fi
......
......@@ -32,8 +32,11 @@ sudo apt-get --yes --quiet --quiet update
sudo apt-get --yes --quiet --quiet install build-essential cmake libpcre2-dev python3-dev python3-cffi
mkdir libyang
git clone https://github.com/CESNET/libyang.git libyang
git fetch
git checkout v2.1.148
mkdir libyang/build
cd libyang/build
echo "*" > .gitignore
cmake -D CMAKE_BUILD_TYPE:String="Release" ..
make
sudo make install
......
......@@ -39,8 +39,8 @@ spec:
cpu: 8
memory: 8Gi
tlsEnabled: true
# You can set either a version of the db or a specific image name
# cockroachDBVersion: v22.2.8
# You can set either a version of the db or a specific image name
# cockroachDBVersion: v22.2.8
image:
name: cockroachdb/cockroach:v22.2.8
# nodes refers to the number of crdb pods that are created
......@@ -49,21 +49,16 @@ spec:
additionalLabels:
crdb: is-cool
# affinity is a new API field that is behind a feature gate that is
# disabled by default. To enable please see the operator.yaml file.
# disabled by default. To enable please see the operator.yaml file.
# The affinity field will accept any podSpec affinity rule.
# affinity:
# podAntiAffinity:
# preferredDuringSchedulingIgnoredDuringExecution:
# - weight: 100
# podAffinityTerm:
# labelSelector:
# matchExpressions:
# - key: app.kubernetes.io/instance
# operator: In
# values:
# - cockroachdb
# topologyKey: kubernetes.io/hostname
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app.kubernetes.io/instance: cockroachdb
# nodeSelectors used to match against
# nodeSelector:
......
......@@ -381,6 +381,7 @@ spec:
spec:
containers:
- args:
- -feature-gates=TolerationRules=true,AffinityRules=true,TopologySpreadRules=true
- -zap-log-level
- info
env:
......
......@@ -70,11 +70,30 @@ spec:
selector:
app: deviceservice
ports:
- name: grpc
protocol: TCP
port: 2020
targetPort: 2020
- name: metrics
protocol: TCP
port: 9192
targetPort: 9192
- name: grpc
protocol: TCP
port: 2020
targetPort: 2020
- name: metrics
protocol: TCP
port: 9192
targetPort: 9192
---
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: deviceservice-hpa
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: deviceservice
minReplicas: 1
maxReplicas: 10
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 80
......@@ -65,11 +65,30 @@ spec:
selector:
app: monitoringservice
ports:
- name: grpc
protocol: TCP
port: 7070
targetPort: 7070
- name: metrics
protocol: TCP
port: 9192
targetPort: 9192
- name: grpc
protocol: TCP
port: 7070
targetPort: 7070
- name: metrics
protocol: TCP
port: 9192
targetPort: 9192
---
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: monitoringservice-hpa
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: monitoringservice
minReplicas: 1
maxReplicas: 10
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 80
container:
image:
tags: 2.9-alpine
env:
# different from k8s units, suffix must be B, KiB, MiB, GiB, or TiB
# should be ~90% of memory limit
GOMEMLIMIT: 400MiB
merge:
# recommended limit is at least 2 CPU cores and 8Gi Memory for production JetStream clusters
resources:
requests:
cpu: 1
memory: 500Mi
limits:
cpu: 1
memory: 1Gi
config:
cluster:
enabled: true
replicas: 3
jetstream:
enabled: true
fileStore:
pvc:
size: 4Gi
# Force one pod per node, if possible
podTemplate:
topologySpreadConstraints:
kubernetes.io/hostname:
maxSkew: 1
whenUnsatisfiable: ScheduleAnyway
\ No newline at end of file
......@@ -18,6 +18,11 @@ metadata:
name: tfs-ingress
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /$2
nginx.ingress.kubernetes.io/limit-rps: '5'
nginx.ingress.kubernetes.io/limit-connections: '10'
nginx.ingress.kubernetes.io/proxy-connect-timeout: '10'
nginx.ingress.kubernetes.io/proxy-send-timeout: '10'
nginx.ingress.kubernetes.io/proxy-read-timeout: '10'
spec:
rules:
- http:
......@@ -43,7 +48,7 @@ spec:
name: nbiservice
port:
number: 8080
- path: /()(debug-api/.*)
- path: /()(tfs-api/.*)
pathType: Prefix
backend:
service:
......
......@@ -111,9 +111,31 @@ spec:
selector:
app: webuiservice
ports:
- name: webui
port: 8004
targetPort: 8004
- name: grafana
port: 3000
targetPort: 3000
- name: webui
port: 8004
targetPort: 8004
- name: grafana
port: 3000
targetPort: 3000
---
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: webuiservice-hpa
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: webuiservice
minReplicas: 1
maxReplicas: 20
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 50
#behavior:
# scaleDown:
# stabilizationWindowSeconds: 30
......@@ -13,6 +13,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ----- Redeploy All ------------------------------------------------------------
# If not already set, enables all components redeployment
export REDEPLOYALL=""
# ----- TeraFlowSDN ------------------------------------------------------------
......@@ -116,6 +121,10 @@ export NATS_EXT_PORT_CLIENT="4222"
# Set the external port NATS HTTP Mgmt GUI interface will be exposed to.
export NATS_EXT_PORT_HTTP="8222"
# Set NATS installation mode to 'single'. This option is convenient for development and testing.
# See ./deploy/all.sh or ./deploy/nats.sh for additional details
export NATS_DEPLOY_MODE="single"
# Disable flag for re-deploying NATS from scratch.
export NATS_REDEPLOY=""
......
......@@ -211,7 +211,7 @@ enum DeviceDriverEnum {
DEVICEDRIVER_XR = 6;
DEVICEDRIVER_IETF_L2VPN = 7;
DEVICEDRIVER_GNMI_OPENCONFIG = 8;
DEVICEDRIVER_FLEXSCALE = 9;
DEVICEDRIVER_OPTICAL_TFS = 9;
DEVICEDRIVER_IETF_ACTN = 10;
DEVICEDRIVER_OC = 11;
}
......
......@@ -22,4 +22,4 @@ RCFILE=$PROJECTDIR/coverage/.coveragerc
# Run unitary tests and analyze coverage of code at same time
# helpful pytest flags: --log-level=INFO -o log_cli=true --verbose --maxfail=1 --durations=0
coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
nbi/tests/test_debug_api.py
nbi/tests/test_tfs_api.py
......@@ -46,7 +46,7 @@ def validate_device_driver_enum(message):
'DEVICEDRIVER_XR',
'DEVICEDRIVER_IETF_L2VPN',
'DEVICEDRIVER_GNMI_OPENCONFIG',
'DEVICEDRIVER_FLEXSCALE',
'DEVICEDRIVER_OPTICAL_TFS',
'DEVICEDRIVER_IETF_ACTN',
]
......
......@@ -31,7 +31,7 @@ class ORM_DeviceDriverEnum(enum.Enum):
XR = DeviceDriverEnum.DEVICEDRIVER_XR
IETF_L2VPN = DeviceDriverEnum.DEVICEDRIVER_IETF_L2VPN
GNMI_OPENCONFIG = DeviceDriverEnum.DEVICEDRIVER_GNMI_OPENCONFIG
FLEXSCALE = DeviceDriverEnum.DEVICEDRIVER_FLEXSCALE
OPTICAL_TFS = DeviceDriverEnum.DEVICEDRIVER_OPTICAL_TFS
IETF_ACTN = DeviceDriverEnum.DEVICEDRIVER_IETF_ACTN
OC = DeviceDriverEnum.DEVICEDRIVER_OC
......
......@@ -156,12 +156,12 @@ if LOAD_ALL_DEVICE_DRIVERS:
]))
if LOAD_ALL_DEVICE_DRIVERS:
from .flexscale.FlexScaleDriver import FlexScaleDriver # pylint: disable=wrong-import-position
from .optical_tfs.OpticalTfsDriver import OpticalTfsDriver # pylint: disable=wrong-import-position
DRIVERS.append(
(FlexScaleDriver, [
(OpticalTfsDriver, [
{
FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.OPEN_LINE_SYSTEM,
FilterFieldEnum.DRIVER: DeviceDriverEnum.DEVICEDRIVER_FLEXSCALE,
FilterFieldEnum.DRIVER: DeviceDriverEnum.DEVICEDRIVER_OPTICAL_TFS,
}
]))
......
......@@ -20,7 +20,7 @@ from common.tools.object_factory.EndPoint import json_endpoint_id
from common.type_checkers.Checkers import chk_string, chk_type
from device.service.driver_api._Driver import _Driver, RESOURCE_ENDPOINTS, RESOURCE_SERVICES
from device.service.driver_api.ImportTopologyEnum import ImportTopologyEnum, get_import_topology
from device.service.drivers.ietf_l2vpn.TfsDebugApiClient import TfsDebugApiClient
from device.service.drivers.ietf_l2vpn.TfsApiClient import TfsApiClient
from .Tools import connection_point, wim_mapping
from .WimconnectorIETFL2VPN import WimconnectorIETFL2VPN
......@@ -56,7 +56,7 @@ class IetfL2VpnDriver(_Driver):
wim_account = {'user': username, 'password': password}
# Mapping updated dynamically with each request
config = {'mapping_not_needed': False, 'service_endpoint_mapping': []}
self.dac = TfsDebugApiClient(self.address, int(self.port), scheme=scheme, username=username, password=password)
self.tac = TfsApiClient(self.address, int(self.port), scheme=scheme, username=username, password=password)
self.wim = WimconnectorIETFL2VPN(wim, wim_account, config=config)
self.conn_info = {} # internal database emulating OSM storage provided to WIM Connectors
......@@ -101,8 +101,8 @@ class IetfL2VpnDriver(_Driver):
try:
chk_string(str_resource_name, resource_key, allow_empty=False)
if resource_key == RESOURCE_ENDPOINTS:
# return endpoints through debug-api and list-devices method
results.extend(self.dac.get_devices_endpoints(self.__import_topology))
# return endpoints through TFS NBI API and list-devices method
results.extend(self.tac.get_devices_endpoints(self.__import_topology))
elif resource_key == RESOURCE_SERVICES:
# return all services through
reply = self.wim.get_all_active_connectivity_services()
......