Skip to content
Snippets Groups Projects
Commit 22749aad authored by Lluis Gifre Renom's avatar Lluis Gifre Renom
Browse files

Merge branch 'feat/tutorial' into 'develop'

Tutorials cleanup

See merge request !10
parents 3b45950c 9e65e44d
No related branches found
No related tags found
2 merge requests!54Release 2.0.0,!10Tutorials cleanup
Showing
with 677 additions and 240 deletions
#!/bin/bash
# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
COMPONENT_NAME=$1
PROJECTDIR=`pwd`
mkdir -p ${PROJECTDIR}/src/${COMPONENT_NAME}
mkdir -p ${PROJECTDIR}/src/${COMPONENT_NAME}/client
mkdir -p ${PROJECTDIR}/src/${COMPONENT_NAME}/service
mkdir -p ${PROJECTDIR}/src/${COMPONENT_NAME}/tests
touch ${PROJECTDIR}/src/${COMPONENT_NAME}/client/__init__.py
touch ${PROJECTDIR}/src/${COMPONENT_NAME}/service/__init__.py
touch ${PROJECTDIR}/src/${COMPONENT_NAME}/tests/__init__.py
touch ${PROJECTDIR}/src/${COMPONENT_NAME}/.gitlab-ci.yml
touch ${PROJECTDIR}/src/${COMPONENT_NAME}/__init__.py
touch ${PROJECTDIR}/src/${COMPONENT_NAME}/Config.py
touch ${PROJECTDIR}/src/${COMPONENT_NAME}/Dockerfile
touch ${PROJECTDIR}/src/${COMPONENT_NAME}/requirements.in
cd ${PROJECTDIR}/src
python gitlab-ci.yml_generator.py -t latest forecaster
cd ${PROJECTDIR}/src/${COMPONENT_NAME}
mv .gitlab-ci.yml gitlab-ci.yaml
${PROJECTDIR}/scripts/add_license_header_to_files.sh
mv gitlab-ci.yaml .gitlab-ci.yml
{
"contexts": [
{
"context_id": {"context_uuid": {"uuid": "admin"}},
"topology_ids": [], "service_ids": []
}
],
"topologies": [
{
"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}},
"device_ids": [], "link_ids": []
}
],
"devices": [
{
"device_id": {"device_uuid": {"uuid": "DC1-GW"}}, "device_type": "emu-datacenter", "device_drivers": [0],
"device_endpoints": [], "device_operational_status": 1, "device_config": {"config_rules": [
{"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
{"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
{"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"eth1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"eth2\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"int\"}]}"}}
]}
},
{
"device_id": {"device_uuid": {"uuid": "DC2-GW"}}, "device_type": "emu-datacenter", "device_drivers": [0],
"device_endpoints": [], "device_operational_status": 1, "device_config": {"config_rules": [
{"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
{"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
{"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"eth1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"eth2\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"int\"}]}"}}
]}
},
{
"device_id": {"device_uuid": {"uuid": "CS1-GW1"}}, "device_type": "packet-router", "device_drivers": [1],
"device_endpoints": [], "device_operational_status": 1, "device_config": {"config_rules": [
{"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
{"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
{"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"10/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}]}"}}
]}
},
{
"device_id": {"device_uuid": {"uuid": "CS1-GW2"}}, "device_type": "packet-router", "device_drivers": [1],
"device_endpoints": [], "device_operational_status": 1, "device_config": {"config_rules": [
{"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
{"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
{"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"10/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}]}"}}
]}
},
{
"device_id": {"device_uuid": {"uuid": "CS2-GW1"}}, "device_type": "packet-router", "device_drivers": [1],
"device_endpoints": [], "device_operational_status": 1, "device_config": {"config_rules": [
{"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
{"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
{"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"10/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}]}"}}
]}
},
{
"device_id": {"device_uuid": {"uuid": "CS2-GW2"}}, "device_type": "packet-router", "device_drivers": [1],
"device_endpoints": [], "device_operational_status": 1, "device_config": {"config_rules": [
{"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
{"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
{"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"10/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}]}"}}
]}
},
{
"device_id": {"device_uuid": {"uuid": "OLS"}}, "device_type": "emu-open-line-system", "device_drivers": [0],
"device_endpoints": [], "device_operational_status": 1, "device_config": {"config_rules": [
{"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
{"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
{"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"optical\", \"uuid\": \"aade6001-f00b-5e2f-a357-6a0a9d3de870\"}, {\"sample_types\": [], \"type\": \"optical\", \"uuid\": \"eb287d83-f05e-53ec-ab5a-adf6bd2b5418\"}, {\"sample_types\": [], \"type\": \"optical\", \"uuid\": \"0ef74f99-1acc-57bd-ab9d-4b958b06c513\"}, {\"sample_types\": [], \"type\": \"optical\", \"uuid\": \"50296d99-58cc-5ce7-82f5-fc8ee4eec2ec\"}]}"}}
]}
}
],
"links": [
{
"link_id": {"link_uuid": {"uuid": "DC1-GW/eth1==CS1-GW1/10/1"}}, "link_endpoint_ids": [
{"device_id": {"device_uuid": {"uuid": "DC1-GW"}}, "endpoint_uuid": {"uuid": "eth1"}},
{"device_id": {"device_uuid": {"uuid": "CS1-GW1"}}, "endpoint_uuid": {"uuid": "10/1"}}
]
},
{
"link_id": {"link_uuid": {"uuid": "DC1-GW/eth2==CS1-GW2/10/1"}}, "link_endpoint_ids": [
{"device_id": {"device_uuid": {"uuid": "DC1-GW"}}, "endpoint_uuid": {"uuid": "eth2"}},
{"device_id": {"device_uuid": {"uuid": "CS1-GW2"}}, "endpoint_uuid": {"uuid": "10/1"}}
]
},
{
"link_id": {"link_uuid": {"uuid": "DC2-GW/eth1==CS2-GW1/10/1"}}, "link_endpoint_ids": [
{"device_id": {"device_uuid": {"uuid": "DC2-GW"}}, "endpoint_uuid": {"uuid": "eth1"}},
{"device_id": {"device_uuid": {"uuid": "CS2-GW1"}}, "endpoint_uuid": {"uuid": "10/1"}}
]
},
{
"link_id": {"link_uuid": {"uuid": "DC2-GW/eth2==CS2-GW2/10/1"}}, "link_endpoint_ids": [
{"device_id": {"device_uuid": {"uuid": "DC2-GW"}}, "endpoint_uuid": {"uuid": "eth2"}},
{"device_id": {"device_uuid": {"uuid": "CS2-GW2"}}, "endpoint_uuid": {"uuid": "10/1"}}
]
},
{
"link_id": {"link_uuid": {"uuid": "CS1-GW1/1/1==OLS/aade6001-f00b-5e2f-a357-6a0a9d3de870"}}, "link_endpoint_ids": [
{"device_id": {"device_uuid": {"uuid": "CS1-GW1"}}, "endpoint_uuid": {"uuid": "1/1"}},
{"device_id": {"device_uuid": {"uuid": "OLS"}}, "endpoint_uuid": {"uuid": "aade6001-f00b-5e2f-a357-6a0a9d3de870"}}
]
},
{
"link_id": {"link_uuid": {"uuid": "CS1-GW2/1/1==OLS/eb287d83-f05e-53ec-ab5a-adf6bd2b5418"}}, "link_endpoint_ids": [
{"device_id": {"device_uuid": {"uuid": "CS1-GW2"}}, "endpoint_uuid": {"uuid": "1/1"}},
{"device_id": {"device_uuid": {"uuid": "OLS"}}, "endpoint_uuid": {"uuid": "eb287d83-f05e-53ec-ab5a-adf6bd2b5418"}}
]
},
{
"link_id": {"link_uuid": {"uuid": "CS2-GW1/1/1==OLS/0ef74f99-1acc-57bd-ab9d-4b958b06c513"}}, "link_endpoint_ids": [
{"device_id": {"device_uuid": {"uuid": "CS2-GW1"}}, "endpoint_uuid": {"uuid": "1/1"}},
{"device_id": {"device_uuid": {"uuid": "OLS"}}, "endpoint_uuid": {"uuid": "0ef74f99-1acc-57bd-ab9d-4b958b06c513"}}
]
},
{
"link_id": {"link_uuid": {"uuid": "CS2-GW2/1/1==OLS/50296d99-58cc-5ce7-82f5-fc8ee4eec2ec"}}, "link_endpoint_ids": [
{"device_id": {"device_uuid": {"uuid": "CS2-GW2"}}, "endpoint_uuid": {"uuid": "1/1"}},
{"device_id": {"device_uuid": {"uuid": "OLS"}}, "endpoint_uuid": {"uuid": "50296d99-58cc-5ce7-82f5-fc8ee4eec2ec"}}
]
}
]
}
# 1. Deployment Guide # 1. Deployment Guide
This section walks you through the process of deploying TeraFlowSDN on top of a Virtual Machine (VM) running MicroK8s This section walks you through the process of deploying TeraFlowSDN on top of a Virtual
Kubernetes platform. The guide includes the details on configuring and installing the VM, installing and configuring Machine (VM) running [MicroK8s Kubernetes platform](https://microk8s.io).
MicroK8s, and deploying and reporting the status of the TeraFlowSDN controller. The guide includes the details on configuring and installing the VM, installing and
configuring MicroK8s, and deploying and reporting the status of the TeraFlowSDN
controller.
## Table of Content: ## Table of Content:
- [1.1. Create VM for the TeraFlowSDN controller](./1-1-create-vm.md) - [1.1. Create VM for the TeraFlowSDN controller](./1-1-create-vm.md)
......
## 1.1.1. Oracle VirtualBox ## 1.1.1. Oracle VirtualBox
### 1.1.1.1. Create a NAT Network in VirtualBox ### 1.1.1.1. Create a NAT Network in VirtualBox
In "Oracle VM VirtualBox Manager", Menu "File > Preferences... > Network", create a NAT network with the following In "Oracle VM VirtualBox Manager", Menu "File > Preferences... > Network", create a NAT
specifications: network with the following specifications:
|Name |CIDR |DHCP |IPv6 | |Name |CIDR |DHCP |IPv6 |
|-----------|-----------|--------|--------| |-----------|-----------|--------|--------|
|TFS-NAT-Net|10.0.2.0/24|Disabled|Disabled| |TFS-NAT-Net|10.0.2.0/24|Disabled|Disabled|
Within the newly created "TFS-NAT-Net" NAT network, configure the following IPv4 forwarding rules: Within the newly created "TFS-NAT-Net" NAT network, configure the following IPv4
forwarding rules:
|Name|Protocol|Host IP |Host Port|Guest IP |Guest Port| |Name|Protocol|Host IP |Host Port|Guest IP |Guest Port|
|----|--------|---------|---------|---------|----------| |----|--------|---------|---------|---------|----------|
...@@ -36,8 +37,9 @@ __Note__: IP address 10.0.2.10 is the one that will be assigned to the VM. ...@@ -36,8 +37,9 @@ __Note__: IP address 10.0.2.10 is the one that will be assigned to the VM.
__Note__: (*) settings to be editing after the VM is created. __Note__: (*) settings to be editing after the VM is created.
### 1.1.1.3. Install Ubuntu 20.04 LTS Operating System ### 1.1.1.3. Install Ubuntu 20.04 LTS Operating System
In "Oracle VM VirtualBox Manager", start the VM in normal mode, and follow the installation procedure. Below we provide In "Oracle VM VirtualBox Manager", start the VM in normal mode, and follow the
some installation guidelines: installation procedure.
Below we provide some installation guidelines:
- Installation Language: English - Installation Language: English
- Autodetect your keyboard - Autodetect your keyboard
- Configure static network specifications: - Configure static network specifications:
...@@ -73,9 +75,10 @@ sudo apt-get dist-upgrade -y ...@@ -73,9 +75,10 @@ sudo apt-get dist-upgrade -y
``` ```
## 1.1.1.5. Install VirtualBox Guest Additions ## 1.1.1.5. Install VirtualBox Guest Additions
On VirtualBox Manager, open the VM main screen. If you are running the VM in headless mode, right click over the VM in On VirtualBox Manager, open the VM main screen. If you are running the VM in headless
the VirtualBox Manager window and click "Show". If a dialog informing about how to leave the interface of the VM is mode, right click over the VM in the VirtualBox Manager window and click "Show".
hown, confirm pressing "Switch" button. The interface of the VM should appear. If a dialog informing about how to leave the interface of the VM is shown, confirm
pressing "Switch" button. The interface of the VM should appear.
Click menu "Device > Insert Guest Additions CD image..." Click menu "Device > Insert Guest Additions CD image..."
......
# 1.1. Create VM for the TeraFlowSDN controller # 1.1. Create VM for the TeraFlowSDN controller
In this section, we install a VM to be used as the deployment, execution, and development environment for the ETSI In this section, we install a VM to be used as the deployment, execution, and
TeraFlowSDN controller. If you already have a remote physical server fitting the requirements specified in this section development environment for the ETSI TeraFlowSDN controller.
feel free to use it instead of deploying a local VM. Other virtualization environments can also be used; in that case, If you already have a remote physical server fitting the requirements specified in this
you will need to adapt these instructions to your particular case. section feel free to use it instead of deploying a local VM.
Other virtualization environments can also be used; in that case, you will need to adapt
these instructions to your particular case.
Different Hypervisors are considered for that. Check the table of contents for available options. If you want to Different Hypervisors are considered for that.
contribute with other Hypervisors, [contact](./README.md#contact) the TFS team through Slack. Check the table of contents for available options.
If you want to contribute with other Hypervisors, [contact](./README.md#contact) the TFS
team through Slack.
## Table of Content: ## Table of Content:
- [1.1.1. Oracle VirtualBox](./1-1-1-create-vm-oracle-virtualbox.md) - [1.1.1. Oracle VirtualBox](./1-1-1-create-vm-oracle-virtualbox.md)
......
# 1.2. Install MicroK8s Kubernetes platform # 1.2. Install MicroK8s Kubernetes platform
This section describes how to deploy the MicroK8s Kubernetes platform and configure it to be used with ETSI TeraFlowSDN This section describes how to deploy the MicroK8s Kubernetes platform and configure it
controller. Besides, Docker is installed to build docker images for the ETSI TeraFlowSDN controller. to be used with ETSI TeraFlowSDN controller.
Besides, Docker is installed to build docker images for the ETSI TeraFlowSDN controller.
The steps described in this section might take some minutes depending on your internet connection speed and the The steps described in this section might take some minutes depending on your internet
resources assigned to your VM, or the specifications of your physical server. connection speed and the resources assigned to your VM, or the specifications of your
physical server.
## 1.2.1. Upgrade the Ubuntu distribution ## 1.2.1. Upgrade the Ubuntu distribution
...@@ -56,6 +58,14 @@ sudo snap install microk8s --classic --channel=1.24/stable ...@@ -56,6 +58,14 @@ sudo snap install microk8s --classic --channel=1.24/stable
# Create alias for command "microk8s.kubectl" to be usable as "kubectl" # Create alias for command "microk8s.kubectl" to be usable as "kubectl"
sudo snap alias microk8s.kubectl kubectl sudo snap alias microk8s.kubectl kubectl
```
It is important to make sure that `ufw` will not interfere with the internal pod-to-pod
and pod-to-Internet traffic.
To do so, first check the status.
If `ufw` is active, use the following command to enable the communication.
```bash
# Verify status of ufw firewall # Verify status of ufw firewall
sudo ufw status sudo ufw status
...@@ -67,6 +77,12 @@ sudo ufw default allow routed ...@@ -67,6 +77,12 @@ sudo ufw default allow routed
## 1.2.5. Add user to the docker and microk8s groups ## 1.2.5. Add user to the docker and microk8s groups
It is important that your user has the permission to run `docker` and `microk8s` in the
terminal.
To allow this, you need to add your user to the `docker` and `microk8s` groups with the
following commands:
```bash ```bash
sudo usermod -a -G docker $USER sudo usermod -a -G docker $USER
sudo usermod -a -G microk8s $USER sudo usermod -a -G microk8s $USER
...@@ -74,7 +90,8 @@ sudo chown -f -R $USER $HOME/.kube ...@@ -74,7 +90,8 @@ sudo chown -f -R $USER $HOME/.kube
sudo reboot sudo reboot
``` ```
In case that the .kube file is not automatically provisioned into your home folder, you may follow the steps below: In case that the .kube file is not automatically provisioned into your home folder, you
may follow the steps below:
```bash ```bash
mkdir -p $HOME/.kube mkdir -p $HOME/.kube
...@@ -83,17 +100,29 @@ microk8s config > $HOME/.kube/config ...@@ -83,17 +100,29 @@ microk8s config > $HOME/.kube/config
sudo reboot sudo reboot
``` ```
## 1.2.6. Check status of Kubernetes ## 1.2.6. Check status of Kubernetes and addons
To retrieve the status of Kubernetes __once__, run the following command:
```bash ```bash
microk8s.status --wait-ready microk8s.status --wait-ready
``` ```
To retrieve the status of Kubernetes __periodically__ (e.g., every 1 second), run the
following command:
```bash
watch -n 1 microk8s.status --wait-ready
```
## 1.2.7. Check all resources in Kubernetes ## 1.2.7. Check all resources in Kubernetes
To retrieve the status of the Kubernetes resources __once__, run the following command:
```bash ```bash
microk8s.kubectl get all --all-namespaces kubectl get all --all-namespaces
``` ```
To retrieve the status of the Kubernetes resources __periodically__ (e.g., every 1
second), run the following command:
```bash
watch -n 1 kubectl get all --all-namespaces
```
## 1.2.8. Enable addons ## 1.2.8. Enable addons
The Addons enabled are: The Addons enabled are:
...@@ -106,10 +135,16 @@ The Addons enabled are: ...@@ -106,10 +135,16 @@ The Addons enabled are:
microk8s.enable dns hostpath-storage ingress registry microk8s.enable dns hostpath-storage ingress registry
``` ```
__Note__: enabling some of the addons might take few minutes. __Important__: Enabling some of the addons might take few minutes.
[Check status](./1-2-install-microk8s.md#124-check-status-of-kubernetes) periodically until all addons are Do not proceed with next steps until the addons are ready.
shown as enabled. Then [Check resources](./1-2-install-microk8s.md#125-check-all-resources-in-kubernetes) Otherwise, the deployment might fail.
periodically until all pods are Ready and Running. To confirm everything is up and running:
1. Periodically
[Check the status of Kubernetes](./1-2-install-microk8s.md#126-check-status-of-kubernetes)
until you see the addons [dns, ha-cluster, hostpath-storage, ingress, registry, storage] in the enabled block.
2. Periodically
[Check Kubernetes resources](./1-2-install-microk8s.md#127-check-all-resources-in-kubernetes)
until all pods are __Ready__ and __Running__.
## 1.2.9. Stop, Restart, and Redeploy ## 1.2.9. Stop, Restart, and Redeploy
......
# 1.3. Deploy TeraFlowSDN over MicroK8s # 1.3. Deploy TeraFlowSDN over MicroK8s
This section describes how to deploy TeraFlowSDN controller on top of MicroK8s using the environment configured in the This section describes how to deploy TeraFlowSDN controller on top of MicroK8s using the
previous sections. environment configured in the previous sections.
## 1.3.1. Install prerequisites ## 1.3.1. Install prerequisites
...@@ -11,52 +11,42 @@ sudo apt-get install -y git curl jq ...@@ -11,52 +11,42 @@ sudo apt-get install -y git curl jq
## 1.3.2. Clone the Git repository of the TeraFlowSDN controller ## 1.3.2. Clone the Git repository of the TeraFlowSDN controller
__Important__: Right now, we have two repositories hosting the code of TeraFlowSDN: GitLab.com and ETSI owned GitLab Clone from ETSI-hosted GitLab code repository:
repository. Nowadays, only GitLab.com repository accepts code contributions that are periodically
mirrored to ETSI labs. In the near future, we plan to swap the repository roles and new contributions
will be accepted only at ETSI labs, while GitLab.com will probably be kept as a mirror of ETSI. If you
plan to contribute code to the TeraFlowSDN controller, by now, clone from GitLab.com. We will update the
tutorial as soon as roles of repositories are swapped.
Clone from GitLab (if you want to contribute code to TeraFlowSDN):
```bash
mkdir ~/tfs-ctrl
git clone https://gitlab.com/teraflow-h2020/controller.git ~/tfs-ctrl
```
Clone from ETSI owned GitLab (if you do not plan to contribute code):
```bash ```bash
mkdir ~/tfs-ctrl mkdir ~/tfs-ctrl
git clone https://labs.etsi.org/rep/tfs/controller.git ~/tfs-ctrl git clone https://labs.etsi.org/rep/tfs/controller.git ~/tfs-ctrl
``` ```
__Important__: The original H2020-TeraFlow project hosted on GitLab.com has been
archieved and will not receive further contributions/updates.
Please, clone from [ETSI-hosted GitLab code repository](https://labs.etsi.org/rep/tfs/controller).
## 1.3.3. Checkout the appropriate Git branch ## 1.3.3. Checkout the appropriate Git branch
By default 'master' branch is checked out. If you want to deploy 'develop' that incorporates the most up-to-date code By default the *master* branch is checked out.
If you want to deploy the *develop* branch, that incorporates the most up-to-date code
contributions and features, run the following command: contributions and features, run the following command:
```bash ```bash
cd ~/tfs-ctrl cd ~/tfs-ctrl
git checkout develop git checkout develop
``` ```
__Important__: During the elaboration and validation of the tutorials, you should checkout branch
"feat/microk8s-deployment". Otherwise, you will not have important files such as "my_deploy.sh" or
"deploy.sh". As soon as the tutorials are completed and approved, we will remove this note and merge the
"feat/microk8s-deployment" into "develop" and later into "master", and then the previous step will be
effective.
## 1.3.4. Prepare a deployment script with the deployment settings ## 1.3.4. Prepare a deployment script with the deployment settings
Create a new deployment script, e.g., `my_deploy.sh`, adding the appropriate settings as follows. This script, by Create a new deployment script, e.g., `my_deploy.sh`, adding the appropriate settings as
default, makes use of the private Docker registry enabled in MicroK8s, as specified in `TFS_REGISTRY_IMAGE`. It builds follows.
the Docker images for the subset of components defined in `TFS_COMPONENTS`, tags them with the tag defined in This script, by default, makes use of the private Docker registry enabled in MicroK8s,
`TFS_IMAGE_TAG`, deploys them in the namespace defined in `TFS_K8S_NAMESPACE`, and (optionally) deploys the extra as specified in `TFS_REGISTRY_IMAGE`.
Kubernetes manifests listed in `TFS_EXTRA_MANIFESTS`. Besides, it lets you specify in `TFS_GRAFANA_PASSWORD` the It builds the Docker images for the subset of components defined in `TFS_COMPONENTS`,
password to be set for the Grafana `admin` user. tags them with the tag defined in `TFS_IMAGE_TAG`, deploys them in the namespace defined
in `TFS_K8S_NAMESPACE`, and (optionally) deploys the extra Kubernetes manifests listed
in `TFS_EXTRA_MANIFESTS`.
Besides, it lets you specify in `TFS_GRAFANA_PASSWORD` the password to be set for the
Grafana `admin` user.
```bash ```bash
cd ~/tfs-ctrl cd ~/tfs-ctrl
tee my_deploy.sh >/dev/null <<EOF tee my_deploy.sh >/dev/null << EOF
export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/" export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/"
export TFS_COMPONENTS="context device automation pathcomp service slice compute monitoring webui" export TFS_COMPONENTS="context device automation pathcomp service slice compute monitoring webui"
export TFS_IMAGE_TAG="dev" export TFS_IMAGE_TAG="dev"
...@@ -68,10 +58,12 @@ EOF ...@@ -68,10 +58,12 @@ EOF
## 1.3.5. Deploy TFS controller ## 1.3.5. Deploy TFS controller
First, source the deployment settings defined in the previous section. This way, you do not need to specify the First, source the deployment settings defined in the previous section.
environment variables in each and every command you execute to operate the TFS controller. Be aware to re-source the This way, you do not need to specify the environment variables in each and every command
file if you open new terminal sessions. you execute to operate the TFS controller.
Then, run the following command to deploy TeraFlowSDN controller on top of the MicroK8s Kubernetes platform. Be aware to re-source the file if you open new terminal sessions.
Then, run the following command to deploy TeraFlowSDN controller on top of the MicroK8s
Kubernetes platform.
```bash ```bash
cd ~/tfs-ctrl cd ~/tfs-ctrl
...@@ -79,16 +71,14 @@ source my_deploy.sh ...@@ -79,16 +71,14 @@ source my_deploy.sh
./deploy.sh ./deploy.sh
``` ```
The script does the following steps: The script performs the following steps:
1. Build the Docker images for the components defined in `TFS_COMPONENTS` 1. Builds the Docker images for the components defined in `TFS_COMPONENTS`
2. Tag the Docker images with the value of `TFS_IMAGE_TAG` 2. Tags the Docker images with the value of `TFS_IMAGE_TAG`
3. Push the Docker images to the repository defined in `TFS_REGISTRY_IMAGE` 3. Pushes the Docker images to the repository defined in `TFS_REGISTRY_IMAGE`
4. Create the namespace defined in `TFS_K8S_NAMESPACE` 4. Creates the namespace defined in `TFS_K8S_NAMESPACE`
5. Deploy the components defined in `TFS_COMPONENTS` 5. Deploys the components defined in `TFS_COMPONENTS`
6. Create the file `tfs_runtime_env_vars.sh` with the environment variables for the components defined in 6. Creates the file `tfs_runtime_env_vars.sh` with the environment variables for the components defined in `TFS_COMPONENTS` defining their local host addresses and their port numbers.
`TFS_COMPONENTS` defining their local host addresses and their port numbers. 7. Create an ingress controller listening at port 80 for HTTP connections to enable external access to the TeraFlowSDN WebUI, Grafana Dashboards, Context Debug endpoints, and Compute NBI interfaces.
7. Create an ingress controller listening at port 80 for HTTP connections to enable external access to the TeraFlowSDN
WebUI, Grafana Dashboards, Context Debug endpoints, and Compute NBI interfaces.
8. Initialize and configure the Grafana dashboards 8. Initialize and configure the Grafana dashboards
9. Report a summary of the deployment (see 9. Report a summary of the deployment (see
[1.5. Show Deployment and Log per Component](./1-5-deploy-logs-troubleshooting.md)) [1.5. Show Deployment and Log per Component](./1-5-deploy-logs-troubleshooting.md))
# 1.4. Access TeraFlowSDN WebUI and Grafana Dashboards # 1.4. Access TeraFlowSDN WebUI and Grafana Dashboards
This section describes how to get access to the TeraFlowSDN controller WebUI and the monitoring Grafana dashboards. This section describes how to get access to the TeraFlowSDN controller WebUI and the
monitoring Grafana dashboards.
## 1.4.1. Access the TeraFlowSDN WebUI ## 1.4.1. Access the TeraFlowSDN WebUI
If you followed the installation steps based on MicroK8s, you got an ingress controller installed that exposes on TCP If you followed the installation steps based on MicroK8s, you got an ingress controller
port 80. In the creation of the VM, a forward from local TCP port 8080 to VM's TCP port 80 is configured, so the WebUIs installed that exposes on TCP port 80.
and REST APIs of TeraFlowSDN should be exposed on endpoint `127.0.0.1:8080`. In the creation of the VM, a forward from local TCP port 8080 to VM's TCP port 80 is
Besides, the ingress controller defines the following reverse proxy paths: configured, so the WebUIs and REST APIs of TeraFlowSDN should be exposed on the endpoint
`127.0.0.1:8080` of your local machine.
Besides, the ingress controller defines the following reverse proxy paths
(on your local machine):
- `http://127.0.0.1:8080/webui`: points to the WebUI of TeraFlowSDN. - `http://127.0.0.1:8080/webui`: points to the WebUI of TeraFlowSDN.
- `http://127.0.0.1:8080/grafana`: points to the Grafana dashboards. This endpoint brings access to the monitoring - `http://127.0.0.1:8080/grafana`: points to the Grafana dashboards.
dashboards of TeraFlowSDN. The credentials for the `admin`user are those defined in the `my_deploy.sh` script, in the This endpoint brings access to the monitoring dashboards of TeraFlowSDN.
`TFS_GRAFANA_PASSWORD` variable. The credentials for the `admin`user are those defined in the `my_deploy.sh` script, in
- `http://127.0.0.1:8080/context`: points to the REST API exposed by the TeraFlowSDN Context component. This endpoint the `TFS_GRAFANA_PASSWORD` variable.
is mainly used for debugging purposes. Note that this endpoint is designed to be accessed from the WebUI. - `http://127.0.0.1:8080/context`: points to the REST API exposed by the TeraFlowSDN
- `http://127.0.0.1:8080/restconf`: points to the Compute component NBI based on RestCONF. This endpoint enables Context component.
connecting external software, such as ETSI OpenSourceMANO NFV Orchestrator, to TeraFlowSDN. This endpoint is mainly used for debugging purposes.
Note that this endpoint is designed to be accessed from the WebUI.
- `http://127.0.0.1:8080/restconf`: points to the Compute component NBI based on RestCONF.
This endpoint enables connecting external software, such as ETSI OpenSourceMANO NFV
Orchestrator, to TeraFlowSDN.
# 1.5. Show Deployment and Log per Component # 1.5. Show Deployment and Log per Component
This section presents some helper scripts to inspect the status of the deployment and the logs of the components. These This section presents some helper scripts to inspect the status of the deployment and
scripts are particularly helpful for troubleshooting during execution of experiments, development, and debugging. the logs of the components.
These scripts are particularly helpful for troubleshooting during execution of
experiments, development, and debugging.
## 1.5.1. Report the deployment of the TFS controller ## 1.5.1. Report the deployment of the TFS controller
The summary report given at the end of the [Deploy TFS controller](./1-3-deploy-tfs.md#135-deploy-tfs-controller) The summary report given at the end of the [Deploy TFS controller](./1-3-deploy-tfs.md#135-deploy-tfs-controller)
procedure can be generated manually at any time by running the following command. You can avoid sourcing `my_deploy.sh` procedure can be generated manually at any time by running the following command.
if it has been already done. You can avoid sourcing `my_deploy.sh` if it has been already done.
```bash ```bash
cd ~/tfs-ctrl cd ~/tfs-ctrl
source my_deploy.sh source my_deploy.sh
./show_deploy.sh ./show_deploy.sh
``` ```
Use this script to validate that all the pods, deployments, replica sets, ingress controller, etc. are ready and have Use this script to validate that all the pods, deployments, replica sets, ingress
the appropriate state, e.g., "running" for Pods, and the services are deployed and have appropriate IP addresses and controller, etc. are ready and have the appropriate state, e.g., *running* for Pods, and
port numbers. the services are deployed and have appropriate IP addresses and port numbers.
## 1.5.2. Report the log of a specific TFS controller component ## 1.5.2. Report the log of a specific TFS controller component
A number of scripts are pre-created in the `scripts` folder to facilitate the inspection of the component logs. For A number of scripts are pre-created in the `scripts` folder to facilitate the inspection
instance, to dump the log of the Context component, run the following command. You can avoid sourcing `my_deploy.sh` of the component logs.
if it has been already done. For instance, to dump the log of the Context component, run the following command.
You can avoid sourcing `my_deploy.sh` if it has been already done.
```bash ```bash
source my_deploy.sh source my_deploy.sh
......
# 2. Run Experiments Guide (WORK IN PROGRESS) # 2. Run Experiments Guide
This section walks you through the process of running experiments in TeraFlowSDN on top of a Oracle VirtualBox-based VM This section walks you through the process of running experiments in TeraFlowSDN on top
running MicroK8s Kubernetes platform. The guide includes the details on configuring the Python environment, some basic of a Oracle VirtualBox-based VM running MicroK8s Kubernetes platform.
The guide includes the details on configuring the Python environment, some basic
commands you might need, configuring the network topology, and executing different experiments. commands you might need, configuring the network topology, and executing different experiments.
Note that the steps followed here are likely to work regardless of the platform (VM)
where TeraFlowSDN is deployed over.
Note also that this guide will keep growing with the new experiments and demonstrations
that are being carried out involving the ETSI TeraFlowSDN controller.
## Table of Content: ## Table of Content:
- [2.1. Configure the Python environment](./2-1-python-environment.md) - [2.1. Configure the Python environment](./2-1-python-environment.md)
- [2.2. OFC'22 Demo - Bootstrap devices, Monitor device Endpoints, Manage L3VPN Services](./2-2-ofc22.md) - [2.2. OFC'22 Demo - Bootstrap devices, Monitor device Endpoints, Manage L3VPN Services](./2-2-ofc22.md)
- [2.3. OECC/PSC'22 Demo (WORK IN PROGRESS)](./2-3-oeccpsc22.md) - [2.3. OECC/PSC'22 Demo (PENDING)](./2-3-oeccpsc22.md)
- [2.4. ECOC'22 Demo - Disjoint DC-2-DC L3VPN Service (WORK IN PROGRESS)](./2-4-ecoc22.md) - [2.4. ECOC'22 Demo - Disjoint DC-2-DC L2VPN Service](./2-4-ecoc22.md)
- [2.5. NFV-SDN'22 Demo (PENDING)](./2-5-nfvsdn22.md) - [2.5. NFV-SDN'22 Demo (PENDING)](./2-5-nfvsdn22.md)
# 2.1. Configure Python Environment # 2.1. Configure Python Environment
This section describes how to configure the Python environment to run experiments and develop code for the ETSI This section describes how to configure the Python environment to run experiments and
TeraFlowSDN controller. develop code for the ETSI TeraFlowSDN controller.
In particular, we use [PyEnv](https://github.com/pyenv/pyenv) to install the appropriate version of Python and manage In particular, we use [PyEnv](https://github.com/pyenv/pyenv) to install the appropriate
the virtual environments. version of Python and manage the virtual environments.
## 2.1.1. Upgrade the Ubuntu distribution ## 2.1.1. Upgrade the Ubuntu distribution
...@@ -22,6 +22,12 @@ sudo apt-get install -y make build-essential libssl-dev zlib1g-dev libbz2-dev li ...@@ -22,6 +22,12 @@ sudo apt-get install -y make build-essential libssl-dev zlib1g-dev libbz2-dev li
## 2.1.3. Install PyEnv ## 2.1.3. Install PyEnv
We recommend installing PyEnv through
[PyEnv Installer](https://github.com/pyenv/pyenv-installer).
Below you can find the instructions, but we refer you to the link for updated
instructions.
```bash ```bash
curl https://pyenv.run | bash curl https://pyenv.run | bash
# When finished, edit ~/.bash_profile // ~/.profile // ~/.bashrc as the installer proposes. # When finished, edit ~/.bash_profile // ~/.profile // ~/.bashrc as the installer proposes.
...@@ -32,7 +38,8 @@ eval "$(pyenv init -)" ...@@ -32,7 +38,8 @@ eval "$(pyenv init -)"
eval "$(pyenv virtualenv-init -)" eval "$(pyenv virtualenv-init -)"
``` ```
In case .bashrc is not linked properly to your profile, you may need to append the following line into your local .profile file: In case .bashrc is not linked properly to your profile, you may need to append the
following line into your local .profile file:
```bash ```bash
# Open ~/.profile and append this line: # Open ~/.profile and append this line:
...@@ -48,40 +55,76 @@ sudo reboot ...@@ -48,40 +55,76 @@ sudo reboot
## 2.1.5. Install Python 3.9 over PyEnv ## 2.1.5. Install Python 3.9 over PyEnv
ETSI TeraFlowSDN uses Python 3.9 by default.
You should install the latest update of Python 3.9.
To find the latest version available in PyEnv, you can run the following command:
```bash
pyenv install --list | grep " 3.9"
```
At the time of writing, this command will output the following list:
```
3.9.0
3.9-dev
3.9.1
3.9.2
3.9.4
3.9.5
3.9.6
3.9.7
3.9.8
3.9.9
3.9.10
3.9.11
3.9.12
3.9.13
3.9.14 ** always select the latest version **
```
Therefore, the latest version is Python 3.9.14.
To install this version, you should run:
```bash ```bash
pyenv install 3.9.13 pyenv install 3.9.14
# This command might take some minutes depending on your Internet connection speed and the performance of your VM. # This command might take some minutes depending on your Internet connection speed
# and the performance of your VM.
``` ```
## 2.1.6. Create the Virtual Environment for TeraFlowSDN ## 2.1.6. Create the Virtual Environment for TeraFlowSDN
The following commands create a virtual environment named as `tfs` using Python v3.9.13 and associate that environment The following commands create a virtual environment named as `tfs` using Python 3.9 and
with the current folder, i.e., `~/tfs-ctrl`. That way, when you are in that folder, the associated virtual environment associate that environment with the current folder, i.e., `~/tfs-ctrl`.
will be used, thus inheriting the Python interpreter, i.e., Python v3.9.13, and the Python packages installed on it. That way, when you are in that folder, the associated virtual environment will be used,
thus inheriting the Python interpreter, i.e., Python 3.9, and the Python packages
installed on it.
```bash ```bash
cd ~/tfs-ctrl cd ~/tfs-ctrl
pyenv virtualenv 3.9.13 tfs pyenv virtualenv 3.9.14 tfs
pyenv local 3.9.13/envs/tfs pyenv local 3.9.14/envs/tfs
``` ```
In case that the correct pyenv does not get automatically activated when you change to the tfs-ctrl/ folder, then execute the following command: In case that the correct pyenv does not get automatically activated when you change to
the tfs-ctrl/ folder, then execute the following command:
```bash ```bash
cd ~/tfs-ctrl cd ~/tfs-ctrl
pyenv activate 3.9.13/envs/tfs pyenv activate 3.9.14/envs/tfs
``` ```
After completing these commands, you should see in your prompt that now you're within the virtual environment After completing these commands, you should see in your prompt that now you're within
`3.9.13/envs/tfs` on folder `~/tfs-ctrl`: the virtual environment `3.9.14/envs/tfs` on folder `~/tfs-ctrl`:
``` ```
(3.9.13/envs/tfs) tfs@tfs-vm:~/tfs-ctrl$ (3.9.14/envs/tfs) tfs@tfs-vm:~/tfs-ctrl$
``` ```
## 2.1.7. Install the basic Python packages within the virtual environment ## 2.1.7. Install the basic Python packages within the virtual environment
From within the `3.9.13/envs/tfs` environment on folder `~/tfs-ctrl`, run the following commands to install the basic From within the `3.9.14/envs/tfs` environment on folder `~/tfs-ctrl`, run the following
Python packages required to work with TeraFlowSDN. commands to install the basic Python packages required to work with TeraFlowSDN.
```bash ```bash
cd ~/tfs-ctrl cd ~/tfs-ctrl
./install_requirements.sh ./install_requirements.sh
......
# 2.2. OFC'22 Demo - Bootstrap devices, Monitor device Endpoints, Manage L3VPN Services # 2.2. OFC'22 Demo - Bootstrap devices, Monitor device Endpoints, Manage L3VPN Services
This functional test reproduces the live demonstration "Demonstration of Zero-touch Device and L3-VPN Service Management This functional test reproduces the live demonstration *Demonstration of Zero-touch
Using the TeraFlow Cloud-native SDN Controller" carried out at Device and L3-VPN Service Management Using the TeraFlow Cloud-native SDN Controller*
[OFC'22](https://www.ofcconference.org/en-us/home/program-speakers/demo/). carried out at [OFC'22](https://ieeexplore.ieee.org/document/9748575) /
[Open access](https://research.chalmers.se/en/publication/c397ef36-837f-416d-a44d-6d3b561d582a).
## 2.2.1. Functional test folder ## 2.2.1. Functional test folder
This functional test can be found in folder `./src/tests/ofc22/`. A convenience alias `./ofc22/` pointing to that folder This functional test can be found in folder `./src/tests/ofc22/`.
has been defined. A convenience alias `./ofc22/` pointing to that folder has been defined.
## 2.2.2. Execute with real devices ## 2.2.2. Execute with real devices
This functional test is designed to operate both with real and emulated devices. This functional test is designed to operate both with real and emulated devices.
By default, emulated devices are used; however, if you have access to real devices, you can create/modify the files By default, emulated devices are used;
`./ofc22/tests/Objects.py` and `./ofc22/tests/Credentials.py` to point to your devices, and map to your own network however, if you have access to real devices, you can create/modify the files
topology. `./ofc22/tests/Objects.py` and `./ofc22/tests/Credentials.py` to point to your devices,
Otherwise, you can modify the `./ofc22/tests/descriptors_emulated.json` that is designed to be uploaded through the and map to your own network topology.
WebUI instead of using the command line scripts. Otherwise, you can modify the `./ofc22/tests/descriptors_emulated.json` that is designed
Note that the default scenario assumes devices R2 and R4 are always emulated, while devices R1, R3, and O1 to be uploaded through the WebUI instead of using the command line scripts.
can be configured as emulated or real devices. Note that the default scenario assumes devices R2 and R4 are always emulated, while
devices R1, R3, and O1 can be configured as emulated or real devices.
__Important__: The device drivers operating with real devices, e.g., OpenConfigDriver, P4Driver, and TransportApiDriver,
have to be considered as experimental. The configuration and monitoring capabilities they support are __Important__: The device drivers operating with real devices, e.g., OpenConfigDriver,
P4Driver, and TransportApiDriver, have to be considered as experimental.
The configuration and monitoring capabilities they support are
limited or partially implemented/tested. Use them with care. limited or partially implemented/tested. Use them with care.
## 2.2.3. Deployment and Dependencies ## 2.2.3. Deployment and Dependencies
To run this functional test, it is assumed you have deployed a MicroK8s-based Kubernetes environment and a TeraFlowSDN To run this functional test, it is assumed you have deployed a MicroK8s-based Kubernetes
controller instance as described in the [Tutorial: Deployment Guide](./1-0-deployment.md), and you configured the Python environment and a TeraFlowSDN controller instance as described in the
[Tutorial: Deployment Guide](./1-0-deployment.md), and you configured the Python
environment as described in environment as described in
[Tutorial: Run Experiments Guide > 2.1. Configure Python Environment](./2-1-python-environment.md). [Tutorial: Run Experiments Guide > 2.1. Configure Python Environment](./2-1-python-environment.md).
Remember to source the scenario settings appropriately, e.g., `cd ~/tfs-ctrl && source my_deploy.sh` in each terminal Remember to source the scenario settings, e.g., `cd ~/tfs-ctrl && source ofc22/deploy_specs.sh` in each terminal you open.
you open. Then, re-build the protocol buffers code from the proto files:
`./proto/generate_code_python.sh`
## 2.2.4. Access to the WebUI and Dashboard ## 2.2.4. Access to the WebUI and Dashboard
When the deployment completes, you can connect to the TeraFlowSDN WebUI and Dashboards as described in When the deployment completes, you can connect to the TeraFlowSDN WebUI and Dashboards
as described in
[Tutorial: Deployment Guide > 1.4. Access TeraFlowSDN WebUI and Grafana Dashboards](./1-4-access-webui.md) [Tutorial: Deployment Guide > 1.4. Access TeraFlowSDN WebUI and Grafana Dashboards](./1-4-access-webui.md)
Notes: Notes:
- the default credentials for the Grafana Dashboiard is user/pass: `admin`/`admin123+`. - the default credentials for the Grafana Dashboiard is user/pass: `admin`/`admin123+`.
- in Grafana, you will find the "L3-Monitorng" in the "Starred dashboards" section. - in Grafana, you will find the *L3-Monitorng* in the *Starred dashboards* section.
## 2.2.5. Test execution ## 2.2.5. Test execution
Before executing the tests, the environment variables need to be prepared. First, make sure to load your deployment variables by: Before executing the tests, the environment variables need to be prepared.
First, make sure to load your deployment variables by:
``` ```
source my_deploy.sh source my_deploy.sh
``` ```
Then, you also need to load the environment variables to support the execution of the tests by: Then, you also need to load the environment variables to support the execution of the
tests by:
``` ```
source tfs_runtime_env_vars.sh source tfs_runtime_env_vars.sh
``` ```
You also need to make sure that you have all the gRPC-generate code in your folder. To do so, run: You also need to make sure that you have all the gRPC-generate code in your folder.
To do so, run:
``` ```
proto/generate_code_python.sh proto/generate_code_python.sh
...@@ -73,9 +82,10 @@ To execute this functional test, four main steps needs to be carried out: ...@@ -73,9 +82,10 @@ To execute this functional test, four main steps needs to be carried out:
3. L3VPN Service removal 3. L3VPN Service removal
4. Cleanup 4. Cleanup
Upon the execution of each test progresses, a report will be generated indicating PASSED / FAILED / SKIPPED. If there Upon the execution of each test progresses, a report will be generated indicating
is some error during the execution, you should see a detailed report on the error. See the troubleshooting section if *PASSED* / *FAILED* / *SKIPPED*.
needed. If there is some error during the execution, you should see a detailed report on the error.
See the troubleshooting section if needed.
You can check the logs of the different components using the appropriate `scripts/show_logs_[component].sh` scripts You can check the logs of the different components using the appropriate `scripts/show_logs_[component].sh` scripts
after you execute each step. after you execute each step.
...@@ -83,57 +93,70 @@ after you execute each step. ...@@ -83,57 +93,70 @@ after you execute each step.
### 2.2.5.1. Device bootstrapping ### 2.2.5.1. Device bootstrapping
This step configures some basic entities (Context and Topology), the devices, and the links in the topology. The This step configures some basic entities (Context and Topology), the devices, and the
expected results are: links in the topology.
The expected results are:
- The devices to be added into the Topology. - The devices to be added into the Topology.
- The devices to be pre-configured and initialized as ENABLED by the Automation component. - The devices to be pre-configured and initialized as ENABLED by the Automation component.
- The monitoring for the device ports (named as endpoints in TeraFlowSDN) to be activated and data collection to - The monitoring for the device ports (named as endpoints in TeraFlowSDN) to be activated and data collection to automatically start.
automatically start.
- The links to be added to the topology. - The links to be added to the topology.
To run this step, you can do it from the WebUI by uploading the file `./ofc22/tests/descriptors_emulated.json` that To run this step, you can do it from the WebUI by uploading the file `./ofc22/tests/descriptors_emulated.json` that
contains the descriptors of the contexts, topologies, devices, and links, or by executing the contains the descriptors of the contexts, topologies, devices, and links, or by
`./ofc22/run_test_01_bootstrap.sh` script. executing the `./ofc22/run_test_01_bootstrap.sh` script.
When the bootstrapping finishes, check in the Grafana L3-Monitoring Dashboard and you should see the monitoring data When the bootstrapping finishes, check in the Grafana L3-Monitoring Dashboard and you
being plotted and updated every 5 seconds (by default). Given that there is no service configured, you should see a should see the monitoring data being plotted and updated every 5 seconds (by default).
0-valued flat plot. Given that there is no service configured, you should see a 0-valued flat plot.
In the WebUI, select the "admin" Context. Then, in the "Devices" tab you should see that 5 different emulated devices In the WebUI, select the *admin* Context.
have been created and activated: 4 packet routers, and 1 optical line system controller. Besides, in the "Services" tab Then, in the *Devices* tab you should see that 5 different emulated devices have been
you should see that there is no service created. Note here that the emulated devices produce synthetic created and activated: 4 packet routers, and 1 optical line system controller.
randomly-generated data and do not care about the services configured. Besides, in the *Services* tab you should see that there is no service created.
Note here that the emulated devices produce synthetic randomly-generated monitoring data
and do not represent any particularservices configured.
### 2.2.5.2. L3VPN Service creation ### 2.2.5.2. L3VPN Service creation
This step configures a new service emulating the request an OSM WIM would make by means of a Mock OSM instance. This step configures a new service emulating the request an OSM WIM would make by means
of a Mock OSM instance.
To run this step, execute the `./ofc22/run_test_02_create_service.sh` script. To run this step, execute the `./ofc22/run_test_02_create_service.sh` script.
When the script finishes, check the WebUI "Services" tab. You should see that two services have been created, one for When the script finishes, check the WebUI *Services* tab. You should see that two
the optical layer and another for the packet layer. Besides, you can check the "Devices" tab to see the configuration services have been created, one for the optical layer and another for the packet layer.
rules that have been configured in each device. In the Grafana Dashboard, given that there is now a service configured, Besides, you can check the *Devices* tab to see the configuration rules that have been
you should see the plots with the monitored data for the device. By default, device R1-EMU is selected. configured in each device.
In the Grafana Dashboard, given that there is now a service configured, you should see
the plots with the monitored data for the device.
By default, device R1-EMU is selected.
### 2.2.5.3. L3VPN Service removal ### 2.2.5.3. L3VPN Service removal
This step deconfigures the previously created services emulating the request an OSM WIM would make by means of a Mock This step deconfigures the previously created services emulating the request an OSM WIM
OSM instance. would make by means of a Mock OSM instance.
To run this step, execute the `./ofc22/run_test_03_delete_service.sh` script, or delete the L3NM service from the WebUI. To run this step, execute the `./ofc22/run_test_03_delete_service.sh` script, or delete
the L3NM service from the WebUI.
When the script finishes, check the WebUI "Services" tab. You should see that the two services have been removed. When the script finishes, check the WebUI *Services* tab.
Besides, in the "Devices" tab you can see that the appropriate configuration rules have been deconfigured. In the You should see that the two services have been removed.
Grafana Dashboard, given that there is no service configured, you should see a 0-valued flat plot again. Besides, in the *Devices* tab you can see that the appropriate configuration rules have
been deconfigured.
In the Grafana Dashboard, given that there is no service configured, you should see a
0-valued flat plot again.
### 2.2.5.4. Cleanup ### 2.2.5.4. Cleanup
This last step performs a cleanup of the scenario removing all the TeraFlowSDN entities for completeness. This last step performs a cleanup of the scenario removing all the TeraFlowSDN entities
for completeness.
To run this step, execute the `./ofc22/run_test_04_cleanup.sh` script. To run this step, execute the `./ofc22/run_test_04_cleanup.sh` script.
When the script finishes, check the WebUI "Devices" tab, you should see that the devices have been removed. Besides, in When the script finishes, check the WebUI *Devices* tab, you should see that the devices
the "Services" tab you can see that the "admin" Context has no services given that that context has been removed. have been removed.
Besides, in the *Services* tab you can see that the "admin" Context has no services
given that that context has been removed.
# 2.4. ECOC'22 Demo - Disjoint DC-2-DC L3VPN Service (WORK IN PROGRESS) # 2.4. ECOC'22 Demo - Disjoint DC-2-DC L2VPN Service
This functional test reproduces the experimental assessment of "Experimental Demonstration of Transport Network Slicing This functional test reproduces the experimental assessment of *Experimental
with SLA Using the TeraFlowSDN Controller" presented at [ECOC'22](https://www.ecoc2022.org/). Demonstration of Transport Network Slicing with SLA Using the TeraFlowSDN Controller*
presented at [ECOC'22](https://www.optica.org/en-us/events/topical_meetings/ecoc/schedule/?day=Tuesday#Tuesday).
## 2.4.1. Functional test folder ## 2.4.1. Functional test folder
This functional test can be found in folder `./src/tests/ecoc22/`. A convenience alias `./ecoc22/` pointing to that This functional test can be found in folder `./src/tests/ecoc22/`.
folder has been defined. A convenience alias `./ecoc22/` pointing to that folder has been defined.
## 2.4.2. Execute with real devices ## 2.4.2. Execute with real devices
This functional test has only been tested with emulated devices; however, if you have access to real devices, you can This functional test has only been tested with emulated devices;
modify the files `./ecoc22/tests/Objects.py` and `./ecoc22/tests/Credentials.py` to point to your devices, and map to however, if you have access to real devices, you can modify the files
your network topology. `./ecoc22/tests/Objects.py` and `./ecoc22/tests/Credentials.py` to point to your devices,
Otherwise, you can modify the `./ecoc22/tests/descriptors_emulated.json` that is designed to be uploaded through the and map to your network topology.
WebUI instead of using the command line scripts. Otherwise, you can modify the `./ecoc22/tests/descriptors_emulated.json` that is
designed to be uploaded through the WebUI instead of using the command line scripts.
__Important__: The device drivers operating with real devices, e.g., OpenConfigDriver, P4Driver, and TransportApiDriver, __Important__: The device drivers operating with real devices, e.g., OpenConfigDriver,
have to be considered as experimental. The configuration and monitoring capabilities they support are P4Driver, and TransportApiDriver, have to be considered as experimental.
The configuration and monitoring capabilities they support are
limited or partially implemented/tested. Use them with care. limited or partially implemented/tested. Use them with care.
## 2.4.3. Deployment and Dependencies ## 2.4.3. Deployment and Dependencies
To run this functional test, it is assumed you have deployed a MicroK8s-based Kubernetes environment and a TeraFlowSDN To run this functional test, it is assumed you have deployed a MicroK8s-based Kubernetes
controller instance as described in the [Tutorial: Deployment Guide](./1-0-deployment.md), and you configured the Python environment and a TeraFlowSDN controller instance as described in the
[Tutorial: Deployment Guide](./1-0-deployment.md), and you configured the Python
environment as described in environment as described in
[Tutorial: Run Experiments Guide > 2.1. Configure Python Environment](./2-1-python-environment.md). [Tutorial: Run Experiments Guide > 2.1. Configure Python Environment](./2-1-python-environment.md).
Remember to source the scenario settings appropriately, e.g., `cd ~/tfs-ctrl && source my_deploy.sh` in each terminal Remember to source the scenario settings, e.g., `cd ~/tfs-ctrl && source ecoc22/deploy_specs.sh`
you open. in each terminal you open.
Next, remember to source the environment variables created by the deployment, e.g.,
`cd ~/tfs-ctrl && source tfs_runtime_env_vars.sh`.
Then, re-build the protocol buffers code from the proto files: Then, re-build the protocol buffers code from the proto files:
`./proto/generate_code_python.sh` `./proto/generate_code_python.sh`
## 2.4.4. Access to the WebUI and Dashboard ## 2.4.4. Access to the WebUI
When the deployment completes, you can connect to the TeraFlowSDN WebUI and Dashboards as described in When the deployment completes, you can connect to the TeraFlowSDN WebUI as described in
[Tutorial: Deployment Guide > 1.4. Access TeraFlowSDN WebUI and Grafana Dashboards](./1-4-access-webui.md) [Tutorial: Deployment Guide > 1.4. Access TeraFlowSDN WebUI and Grafana Dashboards](./1-4-access-webui.md)
Notes: Notes:
- the default credentials for the Grafana Dashboiard is user/pass: `admin`/`admin123+`. - this experiment does not make use of Monitoring, so Grafana is not used.
- this functional test does not involve the Monitoring component, so no monitoring data is plotted in Grafana. - the default credentials for the Grafana Dashboard is user/pass: `admin`/`admin123+`.
- this functional test does not involve the Monitoring component, so no monitoring
data is plotted in Grafana.
## 2.4.5. Test execution ## 2.4.5. Test execution
To execute this functional test, four main steps needs to be carried out: To execute this functional test, four main steps needs to be carried out:
1. Device bootstrapping 1. Device bootstrapping
2. L3VPN Service creation 2. L2VPN Slice and Services creation
3. L3VPN Service removal 3. L2VPN Slice and Services removal
4. Cleanup 4. Cleanup
Upon the execution of each test progresses, a report will be generated indicating PASSED / FAILED / SKIPPED. If there Upon the execution of each test progresses, a report will be generated indicating
is some error during the execution, you should see a detailed report on the error. See the troubleshooting section if *PASSED* / *FAILED* / *SKIPPED*.
needed. If there is some error during the execution, you should see a detailed report on the
error.
See the troubleshooting section if needed.
You can check the logs of the different components using the appropriate `scripts/show_logs_[component].sh` scripts You can check the logs of the different components using the appropriate
after you execute each step. `scripts/show_logs_[component].sh` scripts after you execute each step.
### 2.4.5.1. Device bootstrapping ### 2.4.5.1. Device bootstrapping
This step configures some basic entities (Context and Topology), the devices, and the links in the topology. The This step configures some basic entities (Context and Topology), the devices, and the
expected results are: links in the topology.
The expected results are:
- The devices to be added into the Topology. - The devices to be added into the Topology.
- The devices to be pre-configured and initialized as ENABLED by the Automation component. - The devices to be pre-configured and initialized as *ENABLED* by the Automation component.
- The monitoring for the device ports (named as endpoints in TeraFlowSDN) to be activated and data collection to
automatically start.
- The links to be added to the topology. - The links to be added to the topology.
To run this step, you can do it from the WebUI by uploading the file `./ecoc22/tests/descriptors_emulated.json` that To run this step, you can do it from the WebUI by uploading the file
contains the descriptors of the contexts, topologies, devices, and links, or by executing the `./ecoc22/tests/descriptors_emulated.json` that contains the descriptors of the contexts,
`./ecoc22/run_test_01_bootstrap.sh` script. topologies, devices, and links, or by executing the `./ecoc22/run_test_01_bootstrap.sh` script.
When the bootstrapping finishes, check in the Grafana L3-Monitoring Dashboard and you should see the monitoring data In the WebUI, select the *admin* Context.
being plotted and updated every 5 seconds (by default). Given that there is no service configured, you should see a Then, in the *Devices* tab you should see that 5 different emulated devices have been
0-valued flat plot. created and activated: 4 packet routers, and 1 optical Open Line System (OLS) controller.
Besides, in the *Services* tab you should see that there is no service created.
In the WebUI, select the "admin" Context. Then, in the "Devices" tab you should see that 5 different emulated devices
have been created and activated: 4 packet routers, and 1 optical line system controller. Besides, in the "Services" tab
you should see that there is no service created. Note here that the emulated devices produce synthetic
randomly-generated data and do not care about the services configured.
### 2.4.5.2. L2VPN Slice and Services creation
### 2.4.5.2. L3VPN Service creation This step configures a new service emulating the request an OSM WIM would make by means
of a Mock OSM instance.
This step configures a new service emulating the request an OSM WIM would make by means of a Mock OSM instance.
To run this step, execute the `./ecoc22/run_test_02_create_service.sh` script. To run this step, execute the `./ecoc22/run_test_02_create_service.sh` script.
When the script finishes, check the WebUI "Services" tab. You should see that two services have been created, one for When the script finishes, check the WebUI *Slices* and *Services* tab.
the optical layer and another for the packet layer. Besides, you can check the "Devices" tab to see the configuration You should see that, for the connectivity service requested by MockOSM, one slice has
rules that have been configured in each device. In the Grafana Dashboard, given that there is now a service configured, been created, three services have been created (two for the optical layer and another
you should see the plots with the monitored data for the device. By default, device R1-EMU is selected. for the packet layer).
Note that the two services for the optical layer correspond to the primary (service_uuid
ending with ":0") and the backup (service_uuid ending with ":1") services.
Each of the services indicates the connections and sub-services that are supporting them.
Besides, you can check the *Devices* tab to see the configuration rules that have been
configured in each device.
### 2.4.5.3. L3VPN Service removal ### 2.4.5.3. L2VPN Slice and Services removal
This step deconfigures the previously created services emulating the request an OSM WIM would make by means of a Mock This step deconfigures the previously created slices and services emulating the request
OSM instance. an OSM WIM would make by means of a Mock OSM instance.
To run this step, execute the `./ecoc22/run_test_03_delete_service.sh` script, or delete the L3NM service from the WebUI. To run this step, execute the `./ecoc22/run_test_03_delete_service.sh` script, or delete
the slice from the WebUI.
When the script finishes, check the WebUI "Services" tab. You should see that the two services have been removed. When the script finishes, check the WebUI *Slices* and *Services* tab. You should see
Besides, in the "Devices" tab you can see that the appropriate configuration rules have been deconfigured. In the that the slice and the services have been removed.
Grafana Dashboard, given that there is no service configured, you should see a 0-valued flat plot again. Besides, in the *Devices* tab you can see that the appropriate configuration rules have
been deconfigured.
### 2.4.5.4. Cleanup ### 2.4.5.4. Cleanup
This last step performs a cleanup of the scenario removing all the TeraFlowSDN entities for completeness. This last step performs a cleanup of the scenario removing all the TeraFlowSDN entities
for completeness.
To run this step, execute the `./ecoc22/run_test_04_cleanup.sh` script. To run this step, execute the `./ecoc22/run_test_04_cleanup.sh` script.
When the script finishes, check the WebUI "Devices" tab, you should see that the devices have been removed. Besides, in When the script finishes, check the WebUI *Devices* tab, you should see that the devices
the "Services" tab you can see that the "admin" Context has no services given that that context has been removed. have been removed.
Besides, in the *Slices* and *Services* tab you can see that the *admin* Context has no services
given that that context has been removed.
# 3. Development Guide (WORK IN PROGRESS) # 3. Development Guide (WORK IN PROGRESS)
This section walks you through the process of developing new components for the TeraFlowSDN controller. For convenience, This section walks you through the process of developing new components for
this guide assumes you are using the Oracle VirtualBox-based VM running MicroK8s Kubernetes platform as described in the the TeraFlowSDN controller.
[Deployment Guide](./1-0-deployment.md). The guide includes the details on In particular, the guide includes the details on how to configure VSCode IDE,
develop a new component, and debug individual components.
For convenience, this guide assumes you are using the Oracle VirtualBox-based
VM running MicroK8s Kubernetes platform as described in the
[Deployment Guide](./1-0-deployment.md).
BEsides, it assumes you installed the appropriate Python and PyEnv as
described in [2.1. Configure Python Environment](./2-1-python-environment.md).
## Table of Content: ## Table of Content:
- [3.1. Configure VSCode and Connect to the VM](./3-1-configure-vscode.md) - [3.1. Configure VSCode and Connect to the VM](./3-1-configure-vscode.md)
- [3.2. Development Commands, Tricks, and Hints (WORK IN PROGRESS)](./3-2-develop-cth.md) - [3.2. Developing a new component: Forecaster (WORK IN PROGRESS)](./3-2-develop-new-component.md)
- [3.3. Debugging individual components in VSCode](./3.3-debug-comp.md) - [3.3. Debugging individual components in VSCode](./3-3-debug-comp.md)
- [3.4. Development Commands, Tricks, and Hints (WORK IN PROGRESS)](./3-4-develop-cth.md)
...@@ -88,5 +88,11 @@ This step installs Python extensions in VSCode server running in the VM. ...@@ -88,5 +88,11 @@ This step installs Python extensions in VSCode server running in the VM.
- In VSCode (connected to the VM), click left button "Explorer" - In VSCode (connected to the VM), click left button "Explorer"
- Click "Ctrl+Alt+P" and type "Python: Select Interpreter". Select option "Python: 3.9.13 64-bit ('tfs')" - Click "Ctrl+Alt+P" and type "Python: Select Interpreter". Select option "Python: 3.9.13 64-bit ('tfs')"
in terminal: set python path to be used by VSCode:
`echo "PYTHONPATH=./src" > ~/tfs-ctrl/.env` ## 3.1.6. Define environment variables for VSCode
The source code in the TFS controller project is hosted in folder `src/`. To help VSCode find the Python modules and
packages, add the following file into your working space root folder:
```bash
echo "PYTHONPATH=./src" >> ~/tfs-ctrl/.env
```
# 3.2. Developing a new component: Forecaster (WORK IN PROGRESS)
## 3.2.1. Preliminary requisites
As any microservice-based architecture, the components of TeraFlowSDN can be implemented using different programming languages.
For the sake of simplicity, and given it is the most widely used programming language in TeraFlow, this tutorial page assumes the reader will use Python.
This tutorial assumes you hace successfully completed the steps in
[2.1. Configure the Python Environment](./2-1-python-environment.md) and
[3.1. Configure VSCode and Connect to the VM](./3-1-configure-vscode.md) to prepare your environment.
## 3.2.2. Create the component folder structure
The source code of each component of TeraFlowSDN is hosted in a particular folder within the `src` folder.
Within that folder, typically, 3 subfolders are created:
- Folder `client`: contains a client implementation that the rest of components can use to interact with the component.
See details in [3.2.4. Create the component client](./3-X-develop-new-component.md#3x4-create-the-component-client).
- Folder `service`: contains the implementation of the service logic.
See details in [3.2.5. Create the component service](./3-X-develop-new-component.md#3x5-create-the-component-service).
- Folder `tests`: contains the set of unitary tests to be executed over the component to ensure it is properly implemented.
See details in [3.2.6. Create the component tests](./3-X-develop-new-component.md#3x6-create-the-component-tests).
- File `__init__.py`: defines the component as a sub-package of TeraFlowSDN to facilitate imports.
- File `.gitlab-ci.yml`: defines the GitLab CI/CD settings to build, test, and deploy the component in an automated manner.
- File `Config.py`: contains particular configuration settings and constants for the component.
- File `Dockerfile`: defines the recipe to construct the Docker image for the component.
- File `requirements.in`: defines the Python dependencies that are required by this component.
You can automate the creation of this file structure running the following command.
In this example, we create the `forecaster` component.
```bash
cd ~/tfs-ctrl
scripts/create_component.sh forecaster
```
## 3.2.3. gRPC Proto messages and services
The components, e.g., microservices, of the TeraFlowSDN controller, in general, use a gRPC-based open API to interoperate.
All the protocol definitions can be found in sub-folder `proto` within the root project folder.
For additional details on gRPC, visit the official web-page [gRPC](https://grpc.io/).
In general, each component has an associated _proto_ file named as the name of the component in snake_case.proto.
For instance, the _proto_ file for the `forecaster` component being developed in this tutorial is `proto/forecaster.proto` and implements 3 RPC methods:
- `rpc GetForecastOfTopology (context.TopologyId) returns (Forecast) {}´:
Takes a topology identifier as parameter, and computes the aggregated forecast for the topology.
- `rpc GetForecastOfLink(context.LinkId) returns (Forecast) {}´:
Takes a link identifier as parameter, and computes the aggregated forecast for that link.
- `rpc CheckService (context.ServiceId) returns (ForecastPrediction) {}´:
Takes a service identifier as parameter, computes the forecast for the connections of that service, and retrieves a value indicating if the resources can support the demand.
## 3.2.4. Create the component client
Each component has, by default, a pre-defined client that other components can import to inter-communicate.
The client module, by default, is named as the component's name concatenated with `client`, and written in CamelCase.
For instance, the client for the `forecaster` component would be `ForecasterClient.py`.
This file contains a class with the same name as the file, e.g., `ForecasterClient`, and implements 3 main methods, plus one method for each RPC method supported by the service. These methods are:
- Main methods:
- `__init__(host=None, port=None)`: constructor of the client class.
- `connect(self)`: triggers the connection of the client to the service pointed by host and port class parameters.
- `close(self)`: disconnects the client from the service.
- RPC methods: one for each RPC method defined in the associated service within the proto file, e.g., `proto/forecaster.proto`.
Create file ``
## 3.2.3. Connect VSCode to the VM through "Remote SSH" extension
- Right-click on "TFS-VM"
- Select "Connect to Host in Current Window"
- Reply to the questions asked
- Platform of the remote host "TFS-VM": Linux
- "TFS-VM" has fingerprint "<fingerprint>". Do you want to continue?: Continue
- Type tfs user's password: tfs123
- You should be now connected to the TFS-VM.
__Note__: if you get a connection error message, the reason might be due to wrong SSH server fingerprint. Edit file
"<...>/.ssh/known_hosts" on your local user account, check if there is a line starting with
"[127.0.0.1]:2200" (assuming previous port forwarding configuration), remove the entire line, save the file,
and retry connection.
## 3.2.4. Add SSH key to prevent typing the password every time
This step creates an SSH key in the VM and installs it on the VSCode to prevent having to type the password every time.
- In VSCode (connected to the VM), click menu "Terminal > New Terminal"
- Run the following commands on the VM's terminal through VSCode
```bash
ssh-keygen -t rsa -b 4096 -f ~/.ssh/tfs-vm.key
# leave password empty
ssh-copy-id -i ~/.ssh/tfs-vm.key.pub tfs@10.0.2.10
# tfs@10.0.2.10's password: <type tfs user's password: tfs123>
rm .ssh/known_hosts
```
- In VSCode, click left "Explorer" panel to expand, if not expanded, and click "Open Folder" button.
- Choose "/home/tfs/"
- Type tfs user's password when asked
- Trust authors of the "/home/tfs [SSH: TFS-VM]" folder when asked
- Right click on the file "tfs-vm.key" in the file explorer
- Select "Download..." option
- Download the file into your user's accout ".ssh" folder
- Delete files "tfs-vm.key" and "tfs-vm.key.pub" on the TFS-VM.
- In VSCode, click left "Remote Explorer" panel to expand
- Click the "gear" icon next to "SSH TARGETS" on top of "Remote Explorer" bar
- Choose to edit "<...>/.ssh/config" file (or equivalent)
- Find entry "Host TFS-VM" and update it as follows:
```
Host TFS-VM
HostName 127.0.0.1
Port 2200
ForwardX11 no
User tfs
IdentityFile "<path to the downloaded identity private key file>"
```
- Save the file
- From now, VSCode will use the identity file to connect to the TFS-VM instead of the user's password.
## 3.2.5. Install VSCode Python Extension (in VSCode server)
This step installs Python extensions in VSCode server running in the VM.
- In VSCode (connected to the VM), click left button "Extensions"
- Search "Python" extension in the extension Marketplace.
- Install official "Python" extension released by Microsoft.
- By default, since you're connected to the VM, it will be installed in the VSCode server running in the VM.
- In VSCode (connected to the VM), click left button "Explorer"
- Click "Ctrl+Alt+P" and type "Python: Select Interpreter". Select option "Python: 3.9.13 64-bit ('tfs')"
in terminal: set python path to be used by VSCode:
`echo "PYTHONPATH=./src" > ~/tfs-ctrl/.env`
File moved
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment