Skip to content
Snippets Groups Projects
Commit d4955a06 authored by Lluis Gifre Renom's avatar Lluis Gifre Renom
Browse files

MicroK8s deployment scripts:

- moved old deployment scripts to scripts/old
- added NGINX-based ingress controller to expose http-based endpoints from a single connection point
- adapted webui K8s manifest to be used with the ingress controller
- improved install_requirements.sh (renamed from install_development_dependencies.sh) to prevent collision of package versions
- adapted script to show deployment
- created functional example settings script my_deploy.sh
- created improved deploy.sh script
- added first version of the deployment tutorial. Run Experiments and Development are work in progress.
parent c7ce67d9
No related branches found
No related tags found
1 merge request!54Release 2.0.0
Showing
with 899 additions and 2 deletions
......@@ -155,3 +155,6 @@ cython_debug/
# Sqlite
*.db
# TeraFlowSDN-generated files
tfs_runtime_env_vars.sh
deploy.sh 0 → 100755
#!/bin/bash
# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
########################################################################################################################
# Define your deployment settings here
########################################################################################################################
# If not already set, set the URL of your local Docker registry where the images will be uploaded to. Leave it blank if
# you do not want to use any Docker registry.
export TFS_REGISTRY_IMAGE=${TFS_REGISTRY_IMAGE:-""}
#export TFS_REGISTRY_IMAGE="http://my-container-registry.local/"
# If not already set, set the list of components you want to build images for, and deploy.
export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device automation policy service compute monitoring dbscanserving opticalattackmitigator opticalcentralizedattackdetector webui"}
# If not already set, set the tag you want to use for your images.
export TFS_IMAGE_TAG=${TFS_IMAGE_TAG:-"dev"}
# If not already set, set the name of the Kubernetes namespace to deploy to.
export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs-dev"}
# If not already set, set additional manifest files to be applied after the deployment
export TFS_EXTRA_MANIFESTS=${TFS_EXTRA_MANIFESTS:-""}
# If not already set, set the neew Grafana admin password
export TFS_GRAFANA_PASSWORD=${TFS_GRAFANA_PASSWORD:-"admin123+"}
########################################################################################################################
# Automated steps start here
########################################################################################################################
# Constants
GITLAB_REPO_URL="registry.gitlab.com/teraflow-h2020/controller"
TMP_FOLDER="./tmp"
# Create a tmp folder for files modified during the deployment
TMP_MANIFESTS_FOLDER="$TMP_FOLDER/manifests"
mkdir -p $TMP_MANIFESTS_FOLDER
TMP_LOGS_FOLDER="$TMP_FOLDER/logs"
mkdir -p $TMP_LOGS_FOLDER
echo "Deleting and Creating a new namespace..."
kubectl delete namespace $TFS_K8S_NAMESPACE
kubectl create namespace $TFS_K8S_NAMESPACE
printf "\n"
if [[ "$TFS_COMPONENTS" == *"monitoring"* ]]; then
echo "Creating secrets for InfluxDB..."
#TODO: make sure to change this when having a production deployment
kubectl create secret generic influxdb-secrets --namespace=$TFS_K8S_NAMESPACE \
--from-literal=INFLUXDB_DB="monitoring" --from-literal=INFLUXDB_ADMIN_USER="teraflow" \
--from-literal=INFLUXDB_ADMIN_PASSWORD="teraflow" --from-literal=INFLUXDB_HTTP_AUTH_ENABLED="True"
kubectl create secret generic monitoring-secrets --namespace=$TFS_K8S_NAMESPACE \
--from-literal=INFLUXDB_DATABASE="monitoring" --from-literal=INFLUXDB_USER="teraflow" \
--from-literal=INFLUXDB_PASSWORD="teraflow" --from-literal=INFLUXDB_HOSTNAME="localhost"
printf "\n"
fi
echo "Deploying components and collecting environment variables..."
ENV_VARS_SCRIPT=tfs_runtime_env_vars.sh
echo "# Environment variables for TeraFlowSDN deployment" > $ENV_VARS_SCRIPT
PYTHONPATH=$(pwd)/src
echo "export PYTHONPATH=${PYTHONPATH}" >> $ENV_VARS_SCRIPT
for COMPONENT in $TFS_COMPONENTS; do
echo "Processing '$COMPONENT' component..."
IMAGE_NAME="$COMPONENT:$TFS_IMAGE_TAG"
IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$IMAGE_NAME" | sed 's,//,/,g' | sed 's,http:/,,g')
echo " Building Docker image..."
BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}.log"
if [ "$COMPONENT" == "automation" ] || [ "$COMPONENT" == "policy" ]; then
docker build -t "$IMAGE_NAME" -f ./src/"$COMPONENT"/Dockerfile ./src/"$COMPONENT"/ > "$BUILD_LOG"
else
docker build -t "$IMAGE_NAME" -f ./src/"$COMPONENT"/Dockerfile ./src/ > "$BUILD_LOG"
fi
if [ -n "$TFS_REGISTRY_IMAGE" ]; then
echo "Pushing Docker image to '$TFS_REGISTRY_IMAGE'..."
TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}.log"
docker tag "$IMAGE_NAME" "$IMAGE_URL" > "$TAG_LOG"
PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}.log"
docker push "$IMAGE_URL" > "$PUSH_LOG"
fi
echo " Adapting '$COMPONENT' manifest file..."
MANIFEST="$TMP_MANIFESTS_FOLDER/${COMPONENT}service.yaml"
cp ./manifests/"${COMPONENT}"service.yaml "$MANIFEST"
VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f3)
if [ -n "$TFS_REGISTRY_IMAGE" ]; then
# Registry is set
sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST"
sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Always#g" "$MANIFEST"
else
# Registry is not set
sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT:${VERSION}#image: $IMAGE_NAME#g" "$MANIFEST"
sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Never#g" "$MANIFEST"
fi
echo " Deploying '$COMPONENT' component to Kubernetes..."
DEPLOY_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}.log"
kubectl --namespace $TFS_K8S_NAMESPACE apply -f "$MANIFEST" > "$DEPLOY_LOG"
kubectl --namespace $TFS_K8S_NAMESPACE scale deployment --replicas=0 ${COMPONENT}service >> "$DEPLOY_LOG"
kubectl --namespace $TFS_K8S_NAMESPACE scale deployment --replicas=1 ${COMPONENT}service >> "$DEPLOY_LOG"
echo " Collecting env-vars for '$COMPONENT' component..."
SERVICE_DATA=$(kubectl get service ${COMPONENT}service --namespace $TFS_K8S_NAMESPACE -o json)
if [ -z "${SERVICE_DATA}" ]; then continue; fi
# Env vars for service's host address
SERVICE_HOST=$(echo ${SERVICE_DATA} | jq -r '.spec.clusterIP')
if [ -z "${SERVICE_HOST}" ]; then continue; fi
ENVVAR_HOST=$(echo "${COMPONENT}service_SERVICE_HOST" | tr '[:lower:]' '[:upper:]')
echo "export ${ENVVAR_HOST}=${SERVICE_HOST}" >> $ENV_VARS_SCRIPT
# Env vars for service's 'grpc' port (if any)
SERVICE_PORT_GRPC=$(echo ${SERVICE_DATA} | jq -r '.spec.ports[] | select(.name=="grpc") | .port')
if [ -n "${SERVICE_PORT_GRPC}" ]; then
ENVVAR_PORT_GRPC=$(echo "${COMPONENT}service_SERVICE_PORT_GRPC" | tr '[:lower:]' '[:upper:]')
echo "export ${ENVVAR_PORT_GRPC}=${SERVICE_PORT_GRPC}" >> $ENV_VARS_SCRIPT
fi
# Env vars for service's 'http' port (if any)
SERVICE_PORT_HTTP=$(echo ${SERVICE_DATA} | jq -r '.spec.ports[] | select(.name=="http") | .port')
if [ -n "${SERVICE_PORT_HTTP}" ]; then
ENVVAR_PORT_HTTP=$(echo "${COMPONENT}service_SERVICE_PORT_HTTP" | tr '[:lower:]' '[:upper:]')
echo "export ${ENVVAR_PORT_HTTP}=${SERVICE_PORT_HTTP}" >> $ENV_VARS_SCRIPT
fi
printf "\n"
done
echo "Deploying extra manifests..."
for EXTRA_MANIFEST in $TFS_EXTRA_MANIFESTS; do
echo "Processing manifest '$EXTRA_MANIFEST'..."
kubectl --namespace $TFS_K8S_NAMESPACE apply -f $EXTRA_MANIFEST
printf "\n"
done
# By now, leave this control here. Some component dependencies are not well handled
for COMPONENT in $TFS_COMPONENTS; do
echo "Waiting for '$COMPONENT' component..."
kubectl wait --namespace $TFS_K8S_NAMESPACE \
--for='condition=available' --timeout=300s deployment/${COMPONENT}service
printf "\n"
done
if [[ "$TFS_COMPONENTS" == *"webui"* ]]; then
echo "Configuring WebUI DataStores and Dashboards..."
INFLUXDB_HOST="monitoringservice"
INFLUXDB_PORT=$(kubectl --namespace $TFS_K8S_NAMESPACE get service/monitoringservice -o jsonpath='{.spec.ports[?(@.name=="influxdb")].port}')
INFLUXDB_URL="http://${INFLUXDB_HOST}:${INFLUXDB_PORT}"
INFLUXDB_USER=$(kubectl --namespace $TFS_K8S_NAMESPACE get secrets influxdb-secrets -o jsonpath='{.data.INFLUXDB_ADMIN_USER}' | base64 --decode)
INFLUXDB_PASSWORD=$(kubectl --namespace $TFS_K8S_NAMESPACE get secrets influxdb-secrets -o jsonpath='{.data.INFLUXDB_ADMIN_PASSWORD}' | base64 --decode)
INFLUXDB_DATABASE=$(kubectl --namespace $TFS_K8S_NAMESPACE get secrets influxdb-secrets -o jsonpath='{.data.INFLUXDB_DB}' | base64 --decode)
# Exposed through the ingress controller "tfs-ingress"
GRAFANA_HOSTNAME="127.0.0.1"
GRAFANA_PORT="80"
GRAFANA_BASEURL="/grafana"
# Default Grafana credentials
GRAFANA_USERNAME="admin"
GRAFANA_PASSWORD="admin"
# Default Grafana API URL
GRAFANA_URL_DEFAULT="http://${GRAFANA_USERNAME}:${GRAFANA_PASSWORD}@${GRAFANA_HOSTNAME}:${GRAFANA_PORT}${GRAFANA_BASEURL}"
# Updated Grafana API URL
GRAFANA_URL_UPDATED="http://${GRAFANA_USERNAME}:${TFS_GRAFANA_PASSWORD}@${GRAFANA_HOSTNAME}:${GRAFANA_PORT}${GRAFANA_BASEURL}"
echo "Connecting to grafana at URL: ${GRAFANA_URL_DEFAULT}..."
# Configure Grafana Admin Password
# Ref: https://grafana.com/docs/grafana/latest/http_api/user/#change-password
curl -X PUT -H "Content-Type: application/json" -d '{
"oldPassword": "'${GRAFANA_PASSWORD}'",
"newPassword": "'${TFS_GRAFANA_PASSWORD}'",
"confirmNew" : "'${TFS_GRAFANA_PASSWORD}'"
}' ${GRAFANA_URL_DEFAULT}/api/user/password
echo
# Create InfluxDB DataSource
# Ref: https://grafana.com/docs/grafana/latest/http_api/data_source/
curl -X POST -H "Content-Type: application/json" -d '{
"type" : "influxdb",
"name" : "InfluxDB",
"url" : "'"$INFLUXDB_URL"'",
"access" : "proxy",
"basicAuth": false,
"user" : "'"$INFLUXDB_USER"'",
"password" : "'"$INFLUXDB_PASSWORD"'",
"isDefault": true,
"database" : "'"$INFLUXDB_DATABASE"'"
}' ${GRAFANA_URL_UPDATED}/api/datasources
echo
# Create Monitoring Dashboard
# Ref: https://grafana.com/docs/grafana/latest/http_api/dashboard/
curl -X POST -H "Content-Type: application/json" \
-d '@src/webui/grafana_dashboard.json' \
${GRAFANA_URL_UPDATED}/api/dashboards/db
echo
DASHBOARD_URL="${GRAFANA_URL_UPDATED}/api/dashboards/uid/tf-l3-monit"
DASHBOARD_ID=$(curl -s "${DASHBOARD_URL}" | jq '.dashboard.id')
curl -X POST ${GRAFANA_URL_UPDATED}/api/user/stars/dashboard/${DASHBOARD_ID}
printf "\n\n"
fi
./show_deploy.sh
echo "Done!"
......@@ -13,26 +13,39 @@
# See the License for the specific language governing permissions and
# limitations under the License.
echo "Updating PIP, SetupTools and Wheel..."
pip install --upgrade pip # ensure next packages get the latest versions
pip install --upgrade setuptools wheel # bring basic tooling for other requirements
pip install --upgrade pip-tools pylint # bring tooling for package compilation and code linting
printf "\n"
# installing basic tools
pip install --upgrade pip setuptools wheel pip-tools pylint pytest pytest-benchmark coverage grpcio-tools
echo "Creating integrated requirements file..."
tee requirements.in >/dev/null <<EOF
grpcio-tools==1.43.0
EOF
printf "\n"
# creating an empty file
echo "" > requirements.in
#TODO: include here your component
# List of all the components, not only deployed ones
# TODO: include here your components
COMPONENTS="compute context device service monitoring opticalcentralizedattackdetector opticalattackmitigator dbscanserving webui"
# compiling dependencies from all components
for component in $COMPONENTS
echo "Collecting requirements from components..."
for COMPONENT in $COMPONENTS
do
echo "computing requirements for component $component"
diff requirements.in src/$component/requirements.in | grep '^>' | sed 's/^>\ //' >> requirements.in
diff requirements.in src/$COMPONENT/requirements.in | grep '^>' | sed 's/^>\ //' >> requirements.in
done
printf "\n"
echo "Compiling requirements..."
# Done in a single step to prevent breaking dependencies between components
pip-compile --quiet --output-file=requirements.txt requirements.in
printf "\n"
pip-compile --output-file=requirements.txt requirements.in
echo "Installing requirements..."
python -m pip install -r requirements.txt
printf "\n"
# removing the temporary files
#echo "Removing the temporary files..."
rm requirements.in
rm requirements.txt
printf "\n"
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: tfs-ingress
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /$2
spec:
rules:
- http:
paths:
- path: /webui(/|$)(.*)
pathType: Prefix
backend:
service:
name: webuiservice
port:
number: 8004
- path: /grafana(/|$)(.*)
pathType: Prefix
backend:
service:
name: webuiservice
port:
number: 3000
- path: /context(/|$)(.*)
pathType: Prefix
backend:
service:
name: contextservice
port:
number: 8080
- path: /()(restconf/.*)
pathType: Prefix
backend:
service:
name: computeservice
port:
number: 8080
......@@ -39,6 +39,8 @@ spec:
env:
- name: LOG_LEVEL
value: "DEBUG"
- name: WEBUI_APPLICATION_ROOT
value: /webui
readinessProbe:
httpGet:
path: /healthz/ready
......@@ -65,6 +67,11 @@ spec:
- containerPort: 3000
name: http-grafana
protocol: TCP
env:
- name: GF_SERVER_ROOT_URL
value: "http://0.0.0.0:3000/grafana/"
- name: GF_SERVER_SERVE_FROM_SUB_PATH
value: "true"
readinessProbe:
failureThreshold: 3
httpGet:
......@@ -100,6 +107,9 @@ spec:
selector:
app: webuiservice
ports:
- name: http
- name: webui
port: 8004
targetPort: 8004
- name: grafana
port: 3000
targetPort: 3000
export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/"
export TFS_COMPONENTS="context device automation service compute monitoring webui"
export TFS_IMAGE_TAG="dev"
export TFS_K8S_NAMESPACE="tfs"
export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml"
export TFS_GRAFANA_PASSWORD="admin123+"
......@@ -134,7 +134,7 @@ done
if [[ "$COMPONENTS" == *"webui"* ]]; then
echo "Configuring WebUI DataStores and Dashboards..."
./configure_dashboards.sh
./configure_dashboards_in_kubernetes.sh
printf "\n\n"
fi
......
#!/bin/bash
# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
########################################################################################################################
# Define your deployment settings here
########################################################################################################################
# If not already set, set the name of the Kubernetes namespace to deploy to.
export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs-dev"}
########################################################################################################################
# Automated steps start here
########################################################################################################################
echo "Deployment Resources:"
kubectl --namespace $TFS_K8S_NAMESPACE get all
printf "\n"
echo "Deployment Ingress:"
kubectl --namespace $TFS_K8S_NAMESPACE get ingress
printf "\n"
# 1. Deployment Guide
This section walks you through the process of deploying TeraFlowSDN on top of a Oracle VirtualBox--based VM running
MicroK8s Kubernetes platform. The guide includes the details on configuring and installing the VM, installing and
configuring MicroK8s, and deploying and reporting the status of the TeraFlowSDN controller.
## Table of Content:
- [1.1. Create VM for the TeraFlowSDN controller](./1-1-create-vm.md)
- [1.2. Install MicroK8s Kubernetes platform](./1-2-install-microk8s.md)
- [1.3. Deploy TeraFlowSDN over MicroK8s](./1-3-deploy-tfs.md)
- [1.4. Access TeraFlowSDN WebUI and Grafana Dashboards](./1-4-access-webui.md)
# 1.1. Create VM for the TeraFlowSDN controller
In this section, we install a VM to be used as the deployment, execution, and development environment for the ETSI
TeraFlowSDN controller. If you already have a remote physical server fitting the requirements specified in this section
feel free to use it instead of deploying a local VM. Other virtualization environments can also be used; in that case,
you will need to adapt these instructions to your particular case.
## 1.1.1. Create a NAT Network in VirtualBox
In "Oracle VM VirtualBox Manager", Menu "File > Preferences... > Network", create a NAT network with the following
specifications:
|Name |CIDR |DHCP |IPv6 |
|-----------|-----------|--------|--------|
|TFS-NAT-Net|10.0.2.0/24|Disabled|Disabled|
Within the newly created "TFS-NAT-Net" NAT network, configure the following IPv4 forwarding rules:
|Name|Protocol|Host IP |Host Port|Guest IP |Guest Port|
|----|--------|---------|---------|---------|----------|
|SSH |TCP |127.0.0.1|2200 |10.0.2.10|22 |
|HTTP|TCP |127.0.0.1|8080 |10.0.2.10|80 |
__Note__: IP address 10.0.2.10 is the one that will be assigned to the VM.
## 1.1.2. Create VM in VirtualBox:
In "Oracle VM VirtualBox Manager", create a new VM with the following specifications:
- Name: TFS-VM
- Type/Version: Linux / Ubuntu (64-bit)
- CPU (*): 4 vCPUs @ 100% execution capacity
- RAM: 8 GB
- Disk: 40 GB, Virtual Disk Image (VDI), Dynamically allocated
- Optical Drive ISO Image: "ubuntu-20.04.4-live-server-amd64.iso"
(from [Ubuntu Server 20.04 LTS](https://releases.ubuntu.com/20.04/))
- Network Adapter 1 (*): enabled, attached to NAT Network "TFS-NAT-Net"
- Minor adjustments (*):
- Audio: disabled
- Boot otder: disable "Floppy"
__Note__: (*) settings to be editing after the VM is created.
## 1.1.3. Install Ubuntu 20.04 LTS Operating System
In "Oracle VM VirtualBox Manager", start the VM in normal mode, and follow the installation procedure. Below we provide
some installation guidelines:
- Installation Language: English
- Autodetect your keyboard
- Configure static network specifications:
|Interface|IPv4 Method|Subnet |Address |Gateway |Name servers |Search domains|
|---------|-----------|-----------|---------|--------|---------------|--------------|
|enp0s3 |Manual |10.0.2.0/24|10.0.2.10|10.0.2.1|8.8.8.8,8.8.4.4|<empty> |
- Leave proxy and mirror addresses as they are
- Update the installer (if needed). At the time of writing this walkthrough, version 22.06.1 is the newest one.
- Use an entire disk for the installation
- Disable setup of the disk as LVM group
- Double check that NO swap space is allocated in the partition table. Kubernetes does not work properly with SWAP.
- Configure your user and system names:
- User name: TeraFlowSDN
- Server's name: tfs-vm
- Username: tfs
- Password: tfs123
- Install Open SSH Server
- Import SSH keys, if any.
- Featured Server Snaps
- Do not install featured server snaps. It will be done manually later to illustrate how to uninstall and reinstall
them in case of trouble with.
- Let the system install and upgrade the packages.
- This operation might take some minutes depending on how old is the Optical Drive ISO image you use and your
Internet connection speed.
- Restart the VM when the installation is completed.
## 1.1.4. Upgrade the Ubuntu distribution
```bash
sudo apt-get update -y
sudo apt-get dist-upgrade -y
```
## 1.1.5. Install VirtualBox Guest Additions
On VirtualBox Manager, open the VM main screen. If you are running the VM in headless mode, right click over the VM in
the VirtualBox Manager window and click "Show". If a dialog informing about how to leave the interface of the VM is
hown, confirm pressing "Switch" button. The interface of the VM should appear.
Click menu "Device > Insert Guest Additions CD image..."
On the VM terminal, type:
```bash
sudo apt-get install -y linux-headers-$(uname -r) build-essential dkms
# This command might take some minutes depending on your VM specs and your Internet access speed.
sudo mount /dev/cdrom /mnt/
cd /mnt/
sudo ./VBoxLinuxAdditions.run
# This command might take some minutes depending on your VM specs.
sudo reboot
```
# 1.2. Install MicroK8s Kubernetes platform
This section describes how to deploy the MicroK8s Kubernetes platform and configure it to be used with ETSI TeraFlowSDN
controller. Besides, Docker is installed to build docker images for the ETSI TeraFlowSDN controller.
The steps described in this section might take some minutes depending on your internet connection speed and the
resources assigned to your VM, or the specifications of your physical server.
## 1.2.1. Upgrade the Ubuntu distribution
Skip this step if you already did it during the creation of the VM.
```bash
sudo apt-get update -y
sudo apt-get dist-upgrade -y
```
## 1.2.2. Install prerequisites
```bash
sudo apt-get install -y ca-certificates curl gnupg lsb-release snapd jq
```
## 1.2.3. Install Docker CE
Install Docker CE
```bash
sudo apt-get install -y docker.io
```
Add key "insecure-registries" with the private repository to the daemon configuration. It is done in two commands since
sometimes read from and write to same file might cause trouble.
```bash
if [ -s /etc/docker/daemon.json ]; then cat /etc/docker/daemon.json; else echo '{}'; fi \
| jq 'if has("insecure-registries") then . else .+ {"insecure-registries": []} end' -- \
| jq '."insecure-registries" |= (.+ ["localhost:32000"] | unique)' -- \
| tee tmp.daemon.json
sudo mv tmp.daemon.json /etc/docker/daemon.json
sudo chown root:root /etc/docker/daemon.json
sudo chmod 600 /etc/docker/daemon.json
```
Restart the Docker daemon
```bash
sudo systemctl restart docker
```
## 1.2.4. Install MicroK8s
Ref: https://ubuntu.com/tutorials/install-a-local-kubernetes-with-microk8s
Ref: https://microk8s.io/#install-microk8s
```bash
# Install MicroK8s
sudo snap install microk8s --classic --channel=1.24/stable
# Create alias for command "microk8s.kubectl" to be usable as "kubectl"
sudo snap alias microk8s.kubectl kubectl
# Verify status of ufw firewall
sudo ufw status
# If ufw is active, install following rules to enable access pod-to-pod and pod-to-internet
sudo ufw allow in on cni0 && sudo ufw allow out on cni0
sudo ufw default allow routed
```
## 1.2.5. Add user to the docker and microk8s groups
```bash
sudo usermod -a -G docker $USER
sudo usermod -a -G microk8s $USER
sudo chown -f -R $USER ~/.kube
sudo reboot
```
## 1.2.6. Check status of Kubernetes
```bash
microk8s.status --wait-ready
```
## 1.2.7. Check all resources in Kubernetes
```bash
microk8s.kubectl get all --all-namespaces
```
## 1.2.8. Enable addons
The Addons enabled are:
- `dns`: enables resolving the pods and services by name
- `hostpath-storage`: enables providing storage for the pods (required by `registry`)
- `ingress`: deploys an ingress controller to expose the microservices outside Kubernetes
- `registry`: deploys a private registry for the TFS controller images
```bash
microk8s.enable dns hostpath-storage ingress registry
```
__Note__: enabling some of the addons might take few minutes.
[Check status](./1-2-install-microk8s.md#124-check-status-of-kubernetes) periodically until all addons are
shown as enabled. Then [Check resources](./1-2-install-microk8s.md#125-check-all-resources-in-kubernetes)
periodically until all pods are Ready and Running.
## 1.2.9. Stop, Restart, and Redeploy
Find below some additional commands you might need while you work with MicroK8s:
```bash
microk8s.stop # stop MicroK8s cluster (for instance, before power off your computer)
microk8s.start # start MicroK8s cluster
microk8s.reset # reset infrastructure to a clean state
```
If the following commands does not work to recover the MicroK8s cluster, you can redeploy it.
First remove the current deployment as follows:
```bash
sudo snap remove microk8s
sudo apt-get remove --purge docker.io
```
Then, redeploy as it is described in this section.
# 1.3. Deploy TeraFlowSDN over MicroK8s
This section describes how to deploy TeraFlowSDN controller on top of MicroK8s using the environment configured in the
previous sections.
## 1.3.1. Install prerequisites
```bash
sudo apt-get install -y git curl jq
```
## 1.3.2. Clone the Git repository of the TeraFlowSDN controller
__Important__: Right now, we have two repositories hosting the code of TeraFlowSDN: GitLab.com and ETSI owned GitLab
repository. Nowadays, only GitLab.com repository accepts code contributions that are periodically
mirrored to ETSI labs. In the near future, we plan to swap the repository roles and new contributions
will be accepted only at ETSI labs, while GitLab.com will probably be kept as a mirror of ETSI. If you
plan to contribute code to the TeraFlowSDN controller, by now, clone from GitLab.com. We will update the
tutorial as soon as roles of repositories are swapped.
Clone from GitLab (if you want to contribute code to TeraFlowSDN):
```bash
mkdir ~/tfs-ctrl
git clone https://gitlab.com/teraflow-h2020/controller.git ~/tfs-ctrl
```
Clone from ETSI owned GitLab (if you do not plan to contribute code):
```bash
mkdir ~/tfs-ctrl
git clone https://labs.etsi.org/rep/tfs/controller.git ~/tfs-ctrl
```
## 1.3.3. Checkout the appropriate Git branch
By default 'master' branch is checked out. If you want to deploy 'develop' that incorporates the most up-to-date code
contributions and features, run the following command:
```bash
cd ~/tfs-ctrl
git checkout develop
```
## 1.3.4. Prepare a deployment script with the deployment settings
Create a new deployment script, e.g., `my_deploy.sh`, adding the appropriate settings as follows. This script, by
default, makes use of the private Docker registry enabled in MicroK8s, as specified in `TFS_REGISTRY_IMAGE`. It builds
the Docker images for the subset of components defined in `TFS_COMPONENTS`, tags them with the tag defined in
`TFS_IMAGE_TAG`, deploys them in the namespace defined in `TFS_K8S_NAMESPACE`, and (optionally) deploys the extra
Kubernetes manifests listed in `TFS_EXTRA_MANIFESTS`. Besides, it lets you specify in `TFS_GRAFANA_PASSWORD` the
password to be set for the Grafana `admin` user.
```bash
cd ~/tfs-ctrl
tee my_deploy.sh >/dev/null <<EOF
export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/"
export TFS_COMPONENTS="context device automation service compute monitoring webui"
export TFS_IMAGE_TAG="dev"
export TFS_K8S_NAMESPACE="tfs"
export TFS_EXTRA_MANIFESTS="manifests/microk8s_ingress_http.yaml"
export TFS_GRAFANA_PASSWORD="admin123+"
EOF
```
## 1.3.5. Deploy TFS controller
First, source the deployment settings defined in the previous section. This way, you do not need to specify the
environment variables in each and every command you execute to operate the TFS controller. Be aware to re-source the
file if you open new terminal sessions.
Then, run the following command to deploy TeraFlowSDN controller on top of the MicroK8s Kubernetes platform.
```bash
cd ~/tfs-ctrl
source my_deploy.sh
./deploy.sh
```
The script does the following steps:
1. Build the Docker images for the components defined in `TFS_COMPONENTS`
2. Tag the Docker images with the value of `TFS_IMAGE_TAG`
3. Push the Docker images to the repository defined in `TFS_REGISTRY_IMAGE`
4. Create the namespace defined in `TFS_K8S_NAMESPACE`
5. Deploy the components defined in `TFS_COMPONENTS`
6. Create the file `tfs_runtime_env_vars.sh` with the environment variables for the components defined in
`TFS_COMPONENTS` defining their local host addresses and their port numbers.
7. Create an ingress controller listening at port 80 for HTTP connections to enable external access to the TeraFlowSDN
WebUI, Grafana Dashboards, Context Debug endpoints, and Compute NBI interfaces.
8. Initialize and configure the Grafana dashboards
9. Report a summary of the deployment
## 1.3.6. Report the deployment of the TFS controller
The summary report given at the end of the deployment can be generated manually running the following command. You can
avoid sourcing `my_deploy.sh` if it has been already done.
```bash
cd ~/tfs-ctrl
source my_deploy.sh
./show_deploy.sh
```
# 1.4. Access TeraFlowSDN WebUI and Grafana Dashboards
This section describes how to get access to the TeraFlowSDN controller WebUI and the monitoring Grafana dashboards.
## 1.4.1. Access the TeraFlowSDN WebUI
If you followed the installation steps based on MicroK8s, you got an ingress controller installed that exposes on TCP
port 80. In the creation of the VM, a forward from local TCP port 8080 to VM's TCP port 80 is configured, so the WebUIs
and REST APIs of TeraFlowSDN should be exposed on endpoint `127.0.0.1:8080`.
Besides, the ingress controller defines the following reverse proxy paths:
- `http://127.0.0.1:8080/webui`: points to the WebUI of TeraFlowSDN.
- `http://127.0.0.1:8080/grafana`: points to the Grafana dashboards. This endpoint brings access to the monitoring
dashboards of TeraFlowSDN. The credentials for the `admin`user are those defined in the `my_deploy.sh` script, in the
`TFS_GRAFANA_PASSWORD` variable.
- `http://127.0.0.1:8080/context`: points to the REST API exposed by the TeraFlowSDN Context component. This endpoint
is mainly used for debugging purposes. Note that this endpoint is designed to be accessed from the WebUI.
- `http://127.0.0.1:8080/restconf`: points to the Compute component NBI based on RestCONF. This endpoint enables
connecting external software, such as ETSI OpenSourceMANO NFV Orchestrator, to TeraFlowSDN.
# 2. Run Experiments Guide (WORK IN PROGRESS)
This section walks you through the process of running experiments in TeraFlowSDN on top of a Oracle VirtualBox-based VM
running MicroK8s Kubernetes platform. The guide includes the details on configuring the Python environment, some basic
commands you might need, configuring the network topology, and executing different experiments.
## Table of Content:
- [2.1. Configure the Python environment](./2-1-python-environment.md)
- [2.2. Execute OFC'22 Experiment (WORK IN PROGRESS)](./2-2-ofc22.md)
- [2.3. Execute OECC/PSC'22 Experiment (WORK IN PROGRESS)](./2-3-oeccpsc22.md)
# 2.1. Configure Python Environment
This section describes how to configure the Python environment to run experiments and develop code for the ETSI
TeraFlowSDN controller.
In particular, we use [PyEnv](https://github.com/pyenv/pyenv) to install the appropriate version of Python and manage
the virtual environments.
## 2.1.1. Upgrade the Ubuntu distribution
Skip this step if you already did it during the creation of the VM.
```bash
sudo apt-get update -y
sudo apt-get dist-upgrade -y
```
## 2.1.2. Install PyEnv dependencies in the VM
```bash
sudo apt-get install -y make build-essential libssl-dev zlib1g-dev libbz2-dev libreadline-dev libsqlite3-dev wget \
curl llvm git libncursesw5-dev xz-utils tk-dev libxml2-dev libxmlsec1-dev libffi-dev liblzma-dev
```
## 2.1.3. Install PyEnv
```bash
curl https://pyenv.run | bash
# When finished, edit ~/.bash_profile // ~/.profile // ~/.bashrc as the installer proposes.
# In general, it means to append the following lines to ~/.bashrc:
export PYENV_ROOT="$HOME/.pyenv"
command -v pyenv >/dev/null || export PATH="$PYENV_ROOT/bin:$PATH"
eval "$(pyenv init -)"
eval "$(pyenv virtualenv-init -)"
```
## 2.1.4. Restart the VM
Restart the VM for all the changes to take effect.
```bash
sudo reboot
```
## 2.1.5. Install Python 3.9 over PyEnv
```bash
pyenv install 3.9.13
# This command might take some minutes depending on your Internet connection speed and the performance of your VM.
```
## 2.1.6. Create the Virtual Environment for TeraFlowSDN
The following commands create a virtual environment named as `tfs` using Python v3.9.13 and associate that environment
with the current folder, i.e., `~/tfs-ctrl`. That way, when you are in that folder, the associated virtual environment
will be used, thus inheriting the Python interpreter, i.e., Python v3.9.13, and the Python packages installed on it.
```bash
cd ~/tfs-ctrl
pyenv virtualenv 3.9.13 tfs
pyenv local 3.9.13/envs/tfs
```
After completing these commands, you should see in your prompt that now you're within the virtual environment
`3.9.13/envs/tfs` on folder `~/tfs-ctrl`:
```
(3.9.13/envs/tfs) tfs@tfs-vm:~/tfs-ctrl$
```
## 2.1.7. Install the basic Python packages within the virtual environment
From within the `3.9.13/envs/tfs` environment on folder `~/tfs-ctrl`, run the following commands to install the basic
Python packages required to work with TeraFlowSDN.
```bash
cd ~/tfs-ctrl
./install_requirements.sh
```
# 2.2. OFC'22 (WORK IN PROGRESS)
Check [Old Version](./../ofc22/README.md)
# 2.3. OECC/PSC'22 (WORK IN PROGRESS)
Check [Old Version](./../oeccpsc22/README.md)
# 3. Development Guide (WORK IN PROGRESS)
This section walks you through the process of developing new components for the TeraFlowSDN controller. For convenience,
this guide assumes you are using the Oracle VirtualBox-based VM running MicroK8s Kubernetes platform as described in the
[Deployment Guide](./1-0-deployment.md). The guide includes the details on
## Table of Content:
- [3.1. Configure VSCode and Connect to the VM](./3-1-configure-vscode.md)
- [3.2. Development Commands, Tricks, and Hints (WORK IN PROGRESS)](./3-2-develop-cth.md)
# 3.1. Configure VSCode and Connect to the VM
## 3.1.1. Install VSCode and the required extensions
If not already done, install [VSCode](https://code.visualstudio.com/) and the "Remote SSH" extension on your local
machine, not in the VM.
__Note__: "Python" extension is not required here. It will be installed later on the VSCode server running on the VM.
## 3.1.2. Configure the "Remote SSH" extension
- Go to left icon "Remote Explorer"
- Click the "gear" icon next to "SSH TARGETS" on top of "Remote Explorer" bar
- Choose to edit "<...>/.ssh/config" file (or equivalent)
- Add the following entry (assuming previous port forwarding configuration):
```
Host TFS-VM
HostName 127.0.0.1
Port 2200
ForwardX11 no
User tfs
```
- Save the file
- An entry "TFS-VM" should appear on "SSH TARGETS".
## 3.1.3. Connect VSCode to the VM through "Remote SSH" extension
- Right-click on "TFS-VM"
- Select "Connect to Host in Current Window"
- Reply to the questions asked
- Platform of the remote host "TFS-VM": Linux
- "TFS-VM" has fingerprint "<fingerprint>". Do you want to continue?: Continue
- Type tfs user's password: tfs123
- You should be now connected to the TFS-VM.
__Note__: if you get a connection error message, the reason might be due to wrong SSH server fingerprint. Edit file
"<...>/.ssh/known_hosts" on your local user account, check if there is a line starting with
"[127.0.0.1]:2200" (assuming previous port forwarding configuration), remove the entire line, save the file,
and retry connection.
## 3.1.4. Add SSH key to prevent typing the password every time
This step creates an SSH key in the VM and installs it on the VSCode to prevent having to type the password every time.
- In VSCode (connected to the VM), click menu "Terminal > New Terminal"
- Run the following commands on the VM's terminal through VSCode
```bash
ssh-keygen -t rsa -b 4096 -f ~/.ssh/tfs-vm.key
# leave password empty
ssh-copy-id -i ~/.ssh/tfs-vm.key.pub tfs@10.0.2.10
# tfs@10.0.2.10's password: <type tfs user's password: tfs123>
rm .ssh/known_hosts
```
- In VSCode, click left "Explorer" panel to expand, if not expanded, and click "Open Folder" button.
- Choose "/home/tfs/"
- Type tfs user's password when asked
- Trust authors of the "/home/tfs [SSH: TFS-VM]" folder when asked
- Right click on the file "tfs-vm.key" in the file explorer
- Select "Download..." option
- Download the file into your user's accout ".ssh" folder
- Delete files "tfs-vm.key" and "tfs-vm.key.pub" on the TFS-VM.
- In VSCode, click left "Remote Explorer" panel to expand
- Click the "gear" icon next to "SSH TARGETS" on top of "Remote Explorer" bar
- Choose to edit "<...>/.ssh/config" file (or equivalent)
- Find entry "Host TFS-VM" and update it as follows:
```
Host TFS-VM
HostName 127.0.0.1
Port 2200
ForwardX11 no
User tfs
IdentityFile "<path to the downloaded identity private key file>"
```
- Save the file
- From now, VSCode will use the identity file to connect to the TFS-VM instead of the user's password.
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment