Skip to content
Snippets Groups Projects
Commit eab105e4 authored by Lluis Gifre Renom's avatar Lluis Gifre Renom
Browse files

Merge branch 'develop' of https://gitlab.com/teraflow-h2020/controller into...

Merge branch 'develop' of https://gitlab.com/teraflow-h2020/controller into feat/ecoc22-dc-interconnect-disjoint
parents 00f47211 21101edf
No related branches found
No related tags found
2 merge requests!54Release 2.0.0,!4Compute component:
Showing
with 421 additions and 42 deletions
...@@ -155,3 +155,6 @@ cython_debug/ ...@@ -155,3 +155,6 @@ cython_debug/
# Sqlite # Sqlite
*.db *.db
# TeraFlowSDN-generated files
tfs_runtime_env_vars.sh
deploy.sh 0 → 100755
#!/bin/bash
# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
########################################################################################################################
# Read deployment settings
########################################################################################################################
# If not already set, set the URL of your local Docker registry where the images will be uploaded to.
# Leave it blank if you do not want to use any Docker registry.
export TFS_REGISTRY_IMAGE=${TFS_REGISTRY_IMAGE:-""}
#export TFS_REGISTRY_IMAGE="http://my-container-registry.local/"
# If not already set, set the list of components you want to build images for, and deploy.
# By default, only basic components are deployed
export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device monitoring service compute webui"}
# If not already set, set the tag you want to use for your images.
export TFS_IMAGE_TAG=${TFS_IMAGE_TAG:-"dev"}
# If not already set, set the name of the Kubernetes namespace to deploy to.
export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"}
# If not already set, set additional manifest files to be applied after the deployment
export TFS_EXTRA_MANIFESTS=${TFS_EXTRA_MANIFESTS:-""}
# If not already set, set the neew Grafana admin password
export TFS_GRAFANA_PASSWORD=${TFS_GRAFANA_PASSWORD:-"admin123+"}
########################################################################################################################
# Automated steps start here
########################################################################################################################
# Constants
GITLAB_REPO_URL="registry.gitlab.com/teraflow-h2020/controller"
TMP_FOLDER="./tmp"
# Create a tmp folder for files modified during the deployment
TMP_MANIFESTS_FOLDER="$TMP_FOLDER/manifests"
mkdir -p $TMP_MANIFESTS_FOLDER
TMP_LOGS_FOLDER="$TMP_FOLDER/logs"
mkdir -p $TMP_LOGS_FOLDER
echo "Deleting and Creating a new namespace..."
kubectl delete namespace $TFS_K8S_NAMESPACE
kubectl create namespace $TFS_K8S_NAMESPACE
printf "\n"
if [[ "$TFS_COMPONENTS" == *"monitoring"* ]]; then
echo "Creating secrets for InfluxDB..."
#TODO: make sure to change this when having a production deployment
kubectl create secret generic influxdb-secrets --namespace=$TFS_K8S_NAMESPACE \
--from-literal=INFLUXDB_DB="monitoring" --from-literal=INFLUXDB_ADMIN_USER="teraflow" \
--from-literal=INFLUXDB_ADMIN_PASSWORD="teraflow" --from-literal=INFLUXDB_HTTP_AUTH_ENABLED="True"
kubectl create secret generic monitoring-secrets --namespace=$TFS_K8S_NAMESPACE \
--from-literal=INFLUXDB_DATABASE="monitoring" --from-literal=INFLUXDB_USER="teraflow" \
--from-literal=INFLUXDB_PASSWORD="teraflow" --from-literal=INFLUXDB_HOSTNAME="localhost"
printf "\n"
fi
echo "Deploying components and collecting environment variables..."
ENV_VARS_SCRIPT=tfs_runtime_env_vars.sh
echo "# Environment variables for TeraFlowSDN deployment" > $ENV_VARS_SCRIPT
PYTHONPATH=$(pwd)/src
echo "export PYTHONPATH=${PYTHONPATH}" >> $ENV_VARS_SCRIPT
for COMPONENT in $TFS_COMPONENTS; do
echo "Processing '$COMPONENT' component..."
IMAGE_NAME="$COMPONENT:$TFS_IMAGE_TAG"
IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$IMAGE_NAME" | sed 's,//,/,g' | sed 's,http:/,,g')
echo " Building Docker image..."
BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}.log"
if [ "$COMPONENT" == "automation" ] || [ "$COMPONENT" == "policy" ]; then
docker build -t "$IMAGE_NAME" -f ./src/"$COMPONENT"/Dockerfile ./src/"$COMPONENT"/ > "$BUILD_LOG"
else
docker build -t "$IMAGE_NAME" -f ./src/"$COMPONENT"/Dockerfile . > "$BUILD_LOG"
fi
if [ -n "$TFS_REGISTRY_IMAGE" ]; then
echo "Pushing Docker image to '$TFS_REGISTRY_IMAGE'..."
TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}.log"
docker tag "$IMAGE_NAME" "$IMAGE_URL" > "$TAG_LOG"
PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}.log"
docker push "$IMAGE_URL" > "$PUSH_LOG"
fi
echo " Adapting '$COMPONENT' manifest file..."
MANIFEST="$TMP_MANIFESTS_FOLDER/${COMPONENT}service.yaml"
cp ./manifests/"${COMPONENT}"service.yaml "$MANIFEST"
VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f3)
if [ -n "$TFS_REGISTRY_IMAGE" ]; then
# Registry is set
sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST"
sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Always#g" "$MANIFEST"
else
# Registry is not set
sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT:${VERSION}#image: $IMAGE_NAME#g" "$MANIFEST"
sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Never#g" "$MANIFEST"
fi
echo " Deploying '$COMPONENT' component to Kubernetes..."
DEPLOY_LOG="$TMP_LOGS_FOLDER/deploy_${COMPONENT}.log"
kubectl --namespace $TFS_K8S_NAMESPACE apply -f "$MANIFEST" > "$DEPLOY_LOG"
COMPONENT_OBJNAME=$(echo "${COMPONENT}" | sed "s/\_/-/")
kubectl --namespace $TFS_K8S_NAMESPACE scale deployment --replicas=0 ${COMPONENT_OBJNAME}service >> "$DEPLOY_LOG"
kubectl --namespace $TFS_K8S_NAMESPACE scale deployment --replicas=1 ${COMPONENT_OBJNAME}service >> "$DEPLOY_LOG"
echo " Collecting env-vars for '$COMPONENT' component..."
SERVICE_DATA=$(kubectl get service ${COMPONENT}service --namespace $TFS_K8S_NAMESPACE -o json)
if [ -z "${SERVICE_DATA}" ]; then continue; fi
# Env vars for service's host address
SERVICE_HOST=$(echo ${SERVICE_DATA} | jq -r '.spec.clusterIP')
if [ -z "${SERVICE_HOST}" ]; then continue; fi
ENVVAR_HOST=$(echo "${COMPONENT}service_SERVICE_HOST" | tr '[:lower:]' '[:upper:]')
echo "export ${ENVVAR_HOST}=${SERVICE_HOST}" >> $ENV_VARS_SCRIPT
# Env vars for service's 'grpc' port (if any)
SERVICE_PORT_GRPC=$(echo ${SERVICE_DATA} | jq -r '.spec.ports[] | select(.name=="grpc") | .port')
if [ -n "${SERVICE_PORT_GRPC}" ]; then
ENVVAR_PORT_GRPC=$(echo "${COMPONENT}service_SERVICE_PORT_GRPC" | tr '[:lower:]' '[:upper:]')
echo "export ${ENVVAR_PORT_GRPC}=${SERVICE_PORT_GRPC}" >> $ENV_VARS_SCRIPT
fi
# Env vars for service's 'http' port (if any)
SERVICE_PORT_HTTP=$(echo ${SERVICE_DATA} | jq -r '.spec.ports[] | select(.name=="http") | .port')
if [ -n "${SERVICE_PORT_HTTP}" ]; then
ENVVAR_PORT_HTTP=$(echo "${COMPONENT}service_SERVICE_PORT_HTTP" | tr '[:lower:]' '[:upper:]')
echo "export ${ENVVAR_PORT_HTTP}=${SERVICE_PORT_HTTP}" >> $ENV_VARS_SCRIPT
fi
printf "\n"
done
echo "Deploying extra manifests..."
for EXTRA_MANIFEST in $TFS_EXTRA_MANIFESTS; do
echo "Processing manifest '$EXTRA_MANIFEST'..."
kubectl --namespace $TFS_K8S_NAMESPACE apply -f $EXTRA_MANIFEST
printf "\n"
done
# By now, leave this control here. Some component dependencies are not well handled
for COMPONENT in $TFS_COMPONENTS; do
echo "Waiting for '$COMPONENT' component..."
kubectl wait --namespace $TFS_K8S_NAMESPACE \
--for='condition=available' --timeout=300s deployment/${COMPONENT}service
printf "\n"
done
if [[ "$TFS_COMPONENTS" == *"webui"* ]]; then
echo "Configuring WebUI DataStores and Dashboards..."
sleep 3
INFLUXDB_HOST="monitoringservice"
INFLUXDB_PORT=$(kubectl --namespace $TFS_K8S_NAMESPACE get service/monitoringservice -o jsonpath='{.spec.ports[?(@.name=="influxdb")].port}')
INFLUXDB_URL="http://${INFLUXDB_HOST}:${INFLUXDB_PORT}"
INFLUXDB_USER=$(kubectl --namespace $TFS_K8S_NAMESPACE get secrets influxdb-secrets -o jsonpath='{.data.INFLUXDB_ADMIN_USER}' | base64 --decode)
INFLUXDB_PASSWORD=$(kubectl --namespace $TFS_K8S_NAMESPACE get secrets influxdb-secrets -o jsonpath='{.data.INFLUXDB_ADMIN_PASSWORD}' | base64 --decode)
INFLUXDB_DATABASE=$(kubectl --namespace $TFS_K8S_NAMESPACE get secrets influxdb-secrets -o jsonpath='{.data.INFLUXDB_DB}' | base64 --decode)
# Exposed through the ingress controller "tfs-ingress"
GRAFANA_HOSTNAME="127.0.0.1"
GRAFANA_PORT="80"
GRAFANA_BASEURL="/grafana"
# Default Grafana credentials
GRAFANA_USERNAME="admin"
GRAFANA_PASSWORD="admin"
# Default Grafana API URL
GRAFANA_URL_DEFAULT="http://${GRAFANA_USERNAME}:${GRAFANA_PASSWORD}@${GRAFANA_HOSTNAME}:${GRAFANA_PORT}${GRAFANA_BASEURL}"
# Updated Grafana API URL
GRAFANA_URL_UPDATED="http://${GRAFANA_USERNAME}:${TFS_GRAFANA_PASSWORD}@${GRAFANA_HOSTNAME}:${GRAFANA_PORT}${GRAFANA_BASEURL}"
echo "Connecting to grafana at URL: ${GRAFANA_URL_DEFAULT}..."
# Configure Grafana Admin Password
# Ref: https://grafana.com/docs/grafana/latest/http_api/user/#change-password
curl -X PUT -H "Content-Type: application/json" -d '{
"oldPassword": "'${GRAFANA_PASSWORD}'",
"newPassword": "'${TFS_GRAFANA_PASSWORD}'",
"confirmNew" : "'${TFS_GRAFANA_PASSWORD}'"
}' ${GRAFANA_URL_DEFAULT}/api/user/password
echo
# Create InfluxDB DataSource
# Ref: https://grafana.com/docs/grafana/latest/http_api/data_source/
curl -X POST -H "Content-Type: application/json" -d '{
"type" : "influxdb",
"name" : "InfluxDB",
"url" : "'"$INFLUXDB_URL"'",
"access" : "proxy",
"basicAuth": false,
"user" : "'"$INFLUXDB_USER"'",
"password" : "'"$INFLUXDB_PASSWORD"'",
"isDefault": true,
"database" : "'"$INFLUXDB_DATABASE"'"
}' ${GRAFANA_URL_UPDATED}/api/datasources
echo
# Create Monitoring Dashboard
# Ref: https://grafana.com/docs/grafana/latest/http_api/dashboard/
curl -X POST -H "Content-Type: application/json" \
-d '@src/webui/grafana_dashboard.json' \
${GRAFANA_URL_UPDATED}/api/dashboards/db
echo
DASHBOARD_URL="${GRAFANA_URL_UPDATED}/api/dashboards/uid/tf-l3-monit"
DASHBOARD_ID=$(curl -s "${DASHBOARD_URL}" | jq '.dashboard.id')
curl -X POST ${GRAFANA_URL_UPDATED}/api/user/stars/dashboard/${DASHBOARD_ID}
printf "\n\n"
fi
./show_deploy.sh
echo "Done!"
#!/bin/bash
# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
########################################################################################################################
# Define your deployment settings here
########################################################################################################################
# If not already set, set the name of the Kubernetes namespace to deploy to.
export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs-dev"}
# If not already set, set the list of components you want to build images for, and deploy.
export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device automation policy service compute monitoring dbscanserving opticalattackmitigator opticalcentralizedattackdetector webui"}
########################################################################################################################
# Automated steps start here
########################################################################################################################
echo "Exposing GRPC ports for components..."
for COMPONENT in $TFS_COMPONENTS; do
echo "Processing '$COMPONENT' component..."
SERVICE_GRPC_PORT=$(kubectl get service ${COMPONENT}service --namespace $TFS_K8S_NAMESPACE -o 'jsonpath={.spec.ports[?(@.name=="grpc")].port}')
if [ -z "${SERVICE_GRPC_PORT}" ]; then
printf "\n"
continue;
fi
PATCH='{"data": {"'${SERVICE_GRPC_PORT}'": "'$TFS_K8S_NAMESPACE'/'${COMPONENT}service':'${SERVICE_GRPC_PORT}'"}}'
#echo "PATCH: ${PATCH}"
kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}"
PORT_MAP='{"containerPort": '${SERVICE_GRPC_PORT}', "hostPort": '${SERVICE_GRPC_PORT}'}'
CONTAINER='{"name": "nginx-ingress-microk8s", "ports": ['${PORT_MAP}']}'
PATCH='{"spec": {"template": {"spec": {"containers": ['${CONTAINER}']}}}}'
#echo "PATCH: ${PATCH}"
kubectl patch daemonset nginx-ingress-microk8s-controller --namespace ingress --patch "${PATCH}"
printf "\n"
done
echo "Done!"
#!/bin/bash
# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# If not already set, set the list of components you want to install dependencies for.
# By default, dependencies for all components are installed.
# Components still not supported by this script:
# automation & policy : implemented in Java
# dlt : under design
# pathcomp : under design
ALL_COMPONENTS="context device service compute monitoring webui interdomain slice"
ALL_COMPONENTS="${ALL_COMPONENTS} dbscanserving opticalattackmitigator opticalcentralizedattackdetector"
ALL_COMPONENTS="${ALL_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector l3_distributedattackdetector"
TFS_COMPONENTS=${TFS_COMPONENTS:-$ALL_COMPONENTS}
echo "Updating PIP, SetupTools and Wheel..."
pip install --upgrade pip # ensure next packages get the latest versions
pip install --upgrade setuptools wheel # bring basic tooling for other requirements
pip install --upgrade pip-tools pylint # bring tooling for package compilation and code linting
printf "\n"
echo "Creating integrated requirements file..."
touch requirements.in
diff requirements.in common_requirements.in | grep '^>' | sed 's/^>\ //' >> requirements.in
printf "\n"
echo "Collecting requirements from components..."
for COMPONENT in $TFS_COMPONENTS
do
if [ "$COMPONENT" == "automation" ] || [ "$COMPONENT" == "policy" ]; then continue; fi
diff requirements.in src/$COMPONENT/requirements.in | grep '^>' | sed 's/^>\ //' >> requirements.in
done
printf "\n"
echo "Compiling requirements..."
# Done in a single step to prevent breaking dependencies between components
pip-compile --quiet --output-file=requirements.txt requirements.in
printf "\n"
echo "Installing requirements..."
python -m pip install -r requirements.txt
printf "\n"
#echo "Removing the temporary files..."
rm requirements.in
rm requirements.txt
printf "\n"
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: tfs-ingress
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /$2
spec:
rules:
- http:
paths:
- path: /webui(/|$)(.*)
pathType: Prefix
backend:
service:
name: webuiservice
port:
number: 8004
- path: /grafana(/|$)(.*)
pathType: Prefix
backend:
service:
name: webuiservice
port:
number: 3000
- path: /context(/|$)(.*)
pathType: Prefix
backend:
service:
name: contextservice
port:
number: 8080
- path: /()(restconf/.*)
pathType: Prefix
backend:
service:
name: computeservice
port:
number: 8080
...@@ -67,6 +67,11 @@ spec: ...@@ -67,6 +67,11 @@ spec:
- containerPort: 3000 - containerPort: 3000
name: http-grafana name: http-grafana
protocol: TCP protocol: TCP
env:
- name: GF_SERVER_ROOT_URL
value: "http://0.0.0.0:3000/grafana/"
- name: GF_SERVER_SERVE_FROM_SUB_PATH
value: "true"
readinessProbe: readinessProbe:
failureThreshold: 3 failureThreshold: 3
httpGet: httpGet:
...@@ -102,6 +107,9 @@ spec: ...@@ -102,6 +107,9 @@ spec:
selector: selector:
app: webuiservice app: webuiservice
ports: ports:
- name: http - name: webui
port: 8004 port: 8004
targetPort: 8004 targetPort: 8004
- name: grafana
port: 3000
targetPort: 3000
# Set the URL of your local Docker registry where the images will be uploaded to.
export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/"
# Set the list of components, separated by comas, you want to build images for, and deploy.
# Supported components are:
# context device automation policy service compute monitoring webui
# interdomain slice pathcomp dlt
# dbscanserving opticalattackmitigator opticalcentralizedattackdetector
# l3_attackmitigator l3_centralizedattackdetector l3_distributedattackdetector
export TFS_COMPONENTS="context device automation service compute monitoring webui"
# Set the tag you want to use for your images.
export TFS_IMAGE_TAG="dev"
# Set the name of the Kubernetes namespace to deploy to.
export TFS_K8S_NAMESPACE="tfs"
# Set additional manifest files to be applied after the deployment
export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml"
# Set the neew Grafana admin password
export TFS_GRAFANA_PASSWORD="admin123+"
...@@ -136,7 +136,7 @@ done ...@@ -136,7 +136,7 @@ done
if [[ "$COMPONENTS" == *"webui"* ]]; then if [[ "$COMPONENTS" == *"webui"* ]]; then
echo "Configuring WebUI DataStores and Dashboards..." echo "Configuring WebUI DataStores and Dashboards..."
./configure_dashboards.sh ./configure_dashboards_in_kubernetes.sh
printf "\n\n" printf "\n\n"
fi fi
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
PROJECTDIR=`pwd` PROJECTDIR=`pwd`
cd $(dirname $0)/src cd $PROJECTDIR/src
RCFILE=$PROJECTDIR/coverage/.coveragerc RCFILE=$PROJECTDIR/coverage/.coveragerc
echo echo
......
...@@ -20,9 +20,5 @@ cd $PROJECTDIR/src ...@@ -20,9 +20,5 @@ cd $PROJECTDIR/src
RCFILE=$PROJECTDIR/coverage/.coveragerc RCFILE=$PROJECTDIR/coverage/.coveragerc
# Run unitary tests and analyze coverage of code at same time # Run unitary tests and analyze coverage of code at same time
# Useful flags for pytest:
#-o log_cli=true -o log_file=service.log -o log_file_level=DEBUG
coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \ coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
compute/tests/test_unitary.py compute/tests/test_unitary.py
...@@ -26,9 +26,5 @@ export REDIS_SERVICE_HOST=$(kubectl get node $K8S_HOSTNAME -o 'jsonpath={.status ...@@ -26,9 +26,5 @@ export REDIS_SERVICE_HOST=$(kubectl get node $K8S_HOSTNAME -o 'jsonpath={.status
export REDIS_SERVICE_PORT=$(kubectl get service redis-tests --namespace $K8S_NAMESPACE -o 'jsonpath={.spec.ports[?(@.port==6379)].nodePort}') export REDIS_SERVICE_PORT=$(kubectl get service redis-tests --namespace $K8S_NAMESPACE -o 'jsonpath={.spec.ports[?(@.port==6379)].nodePort}')
# Run unitary tests and analyze coverage of code at same time # Run unitary tests and analyze coverage of code at same time
# Useful flags for pytest:
#-o log_cli=true -o log_file=device.log -o log_file_level=DEBUG
coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \ coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
context/tests/test_unitary.py context/tests/test_unitary.py
...@@ -21,9 +21,6 @@ RCFILE=$PROJECTDIR/coverage/.coveragerc ...@@ -21,9 +21,6 @@ RCFILE=$PROJECTDIR/coverage/.coveragerc
# Run unitary tests and analyze coverage of code at same time # Run unitary tests and analyze coverage of code at same time
# Useful flags for pytest:
#-o log_cli=true -o log_file=device.log -o log_file_level=DEBUG
coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \ coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
device/tests/test_unitary_emulated.py device/tests/test_unitary_emulated.py
......
...@@ -20,9 +20,5 @@ cd $PROJECTDIR/src ...@@ -20,9 +20,5 @@ cd $PROJECTDIR/src
RCFILE=$PROJECTDIR/coverage/.coveragerc RCFILE=$PROJECTDIR/coverage/.coveragerc
# Run unitary tests and analyze coverage of code at same time # Run unitary tests and analyze coverage of code at same time
# Useful flags for pytest:
#-o log_cli=true -o log_file=device.log -o log_file_level=DEBUG
coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \ coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
device/tests/test_unitary_emulated.py device/tests/test_unitary_emulated.py
...@@ -20,9 +20,5 @@ cd $PROJECTDIR/src ...@@ -20,9 +20,5 @@ cd $PROJECTDIR/src
RCFILE=$PROJECTDIR/coverage/.coveragerc RCFILE=$PROJECTDIR/coverage/.coveragerc
# Run unitary tests and analyze coverage of code at same time # Run unitary tests and analyze coverage of code at same time
# Useful flags for pytest:
#-o log_cli=true -o log_file=device.log -o log_file_level=DEBUG
coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \ coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
device/tests/test_unitary_microwave.py device/tests/test_unitary_microwave.py
...@@ -20,9 +20,5 @@ cd $PROJECTDIR/src ...@@ -20,9 +20,5 @@ cd $PROJECTDIR/src
RCFILE=$PROJECTDIR/coverage/.coveragerc RCFILE=$PROJECTDIR/coverage/.coveragerc
# Run unitary tests and analyze coverage of code at same time # Run unitary tests and analyze coverage of code at same time
# Useful flags for pytest:
#-o log_cli=true -o log_file=device.log -o log_file_level=DEBUG
coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \ coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
device/tests/test_unitary_openconfig.py device/tests/test_unitary_openconfig.py
...@@ -20,9 +20,5 @@ cd $PROJECTDIR/src ...@@ -20,9 +20,5 @@ cd $PROJECTDIR/src
RCFILE=$PROJECTDIR/coverage/.coveragerc RCFILE=$PROJECTDIR/coverage/.coveragerc
# Run unitary tests and analyze coverage of code at same time # Run unitary tests and analyze coverage of code at same time
# Useful flags for pytest:
#-o log_cli=true -o log_file=device.log -o log_file_level=DEBUG
coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \ coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
device/tests/test_unitary_p4.py device/tests/test_unitary_p4.py
...@@ -20,9 +20,5 @@ cd $PROJECTDIR/src ...@@ -20,9 +20,5 @@ cd $PROJECTDIR/src
RCFILE=$PROJECTDIR/coverage/.coveragerc RCFILE=$PROJECTDIR/coverage/.coveragerc
# Run unitary tests and analyze coverage of code at same time # Run unitary tests and analyze coverage of code at same time
# Useful flags for pytest:
#-o log_cli=true -o log_file=device.log -o log_file_level=DEBUG
coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \ coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
device/tests/test_unitary_tapi.py device/tests/test_unitary_tapi.py
...@@ -20,9 +20,5 @@ cd $PROJECTDIR/src ...@@ -20,9 +20,5 @@ cd $PROJECTDIR/src
RCFILE=$PROJECTDIR/coverage/.coveragerc RCFILE=$PROJECTDIR/coverage/.coveragerc
# Run unitary tests and analyze coverage of code at same time # Run unitary tests and analyze coverage of code at same time
# Useful flags for pytest:
#-o log_cli=true -o log_file=service.log -o log_file_level=DEBUG
coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \ coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
service/tests/test_unitary.py service/tests/test_unitary.py
...@@ -20,9 +20,5 @@ cd $PROJECTDIR/src ...@@ -20,9 +20,5 @@ cd $PROJECTDIR/src
RCFILE=$PROJECTDIR/coverage/.coveragerc RCFILE=$PROJECTDIR/coverage/.coveragerc
# Run unitary tests and analyze coverage of code at same time # Run unitary tests and analyze coverage of code at same time
# Useful flags for pytest:
#-o log_cli=true -o log_file=service.log -o log_file_level=DEBUG
coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \ coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
slice/tests/test_unitary.py slice/tests/test_unitary.py
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment