diff --git a/hackfest3 b/hackfest3 new file mode 120000 index 0000000000000000000000000000000000000000..2816e4af9a9b4f4b06651710e87d93f4d5db1f0b --- /dev/null +++ b/hackfest3 @@ -0,0 +1 @@ +src/tests/hackfest3/ \ No newline at end of file diff --git a/src/tests/hackfest3/README.md b/src/tests/hackfest3/README.md new file mode 100644 index 0000000000000000000000000000000000000000..b9debfd9b054dc6f9173551909b0035c37aaf3b3 --- /dev/null +++ b/src/tests/hackfest3/README.md @@ -0,0 +1,41 @@ +# Tests for P4 functionality of TeraFlowSDN + +This directory contains the necessary scripts and configurations to run tests for the P4 functionality of TFS. + +## Basic scripts + +To run the experiments you should use the five scripts in the following order: +``` +setup.sh +run_test_01_bootstrap.sh +run_test_02_create_service.sh +run_test_03_delete_service.sh +run_test_04_cleanup.sh +``` + +The setup script copies the necessary artifacts to the SBI service pod. It should be run just once, after a fresh install of TFS. +The bootstrap script registers the context, topology, links and, devices to TFS. +The create service scripts establishes a service between two endpoints. +The delete service script delete the aforementioned service. +Cleanup script deletes all the objects (context, topology, links, devices) from TFS. + +## Objects file + +The above bash scripts make use of the corresponding python scripts found under `./tests/` directory. +More important is the `./tests/Objects.py` file, which contains the definition of the Context, Topology, Devices, Links, Services. **This is the file that need changes in case of a new topology.** + +Check the `./tests/Objects.py` file before running the experiment to make sure that the switches details are correct (ip address, port, etc.) + +## Mininet topologies + +In the `./mininet/` directory there are different mininet topology examples. The current `./tests/Objects.py` file corresponds to the `./mininet/4switch2path.py` topology. For more topologies please refer to `../p4`. + +## P4 artifacts + +In the `./p4/` directory there are the compiled p4 artifacts that contain the pipeline that will be pushed to the p4 switch, along with the p4-runtime definitions. +The `./setup.sh` script copies from this directory. So if you need to change p4 program, make sure to put the compiled artifacts here. + +## Latency probe + +In the `./probe/` directory there is a little program which calculates latency between two hosts in mininet and sends them to the Monitoring component. For specific instructions, refer to the corresponding `./probe/README.md` file. + diff --git a/src/tests/hackfest3/__init__.py b/src/tests/hackfest3/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1549d9811aa5d1c193a44ad45d0d7773236c0612 --- /dev/null +++ b/src/tests/hackfest3/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/tests/hackfest3/commands.txt b/src/tests/hackfest3/commands.txt new file mode 100644 index 0000000000000000000000000000000000000000..0db6497122c4ac3f7de52057cd41e257f4cb37af --- /dev/null +++ b/src/tests/hackfest3/commands.txt @@ -0,0 +1,111 @@ +################## TFS deployment and setup ####################### + +###### Deployment +# To deploy TFS: +cd ~/contoller +source my_deploy.sh +./deploy.sh + +###### Experiments +# To onboard the experiment +cd ~/contoller + +# Setup the compiled files +./src/tests/hackfest3/setup.sh + +# Register the devices +./src/tests/hackfest3/run_test_01_bootstrap.sh + +# Create the service +./src/tests/hackfest3/run_test_02_create_service.sh + +# Delete the service +./src/tests/hackfest3/run_test_03_delete_service.sh + +# Cleanup TFS +./src/tests/hackfest3/run_test_04_cleanup.sh + + +########################## MININET ################## +# To get the mininet cli +cd ~/ngsdn-tutorial +make mn-cli + +#### screen +# To open a screen in mininet + # For client + MN: client screen -S + + # For server + MN: server screen -S + +# To get out +Press + d + +# To reconnect +MN: client screen -r + + +####################### PROBE ###################### +### old probe +# agent +source tfs_runtime_env_vars.sh + +cd ~/controller/src/tests/hackfest3/probe/probe-tfs + +./deploy.sh +./connect_to_mininet.sh +./tfsagent + +# pinger +# In mininet +cd ~/ngsdn-tutorial +make mn-cli +MN: client ./tfsping + + +### new probe +cd ~/controller/src/tests/hackfest3/new-probe +./copy.sh + +# agent: +# import PYTHONPATH by tfs_enviromental variables +source tfs_runtime_env_vars.sh +python agent.py + +# pinger +# In mininet +cd ~/ngsdn-tutorial +make mn-cli +MN: client python ping2.py 10.0.0.2 + +# To enter delay or packet loss +cd ~/controller/src/tests/hackfest3/new-probe +./connect_to_mininet.sh +tc qdisc add dev root netem delay ms +tc qdisc add dev root netem loss % + +################ INT (interactive session 2) ########### +# build the new code +cd ~/controller/src/tests/hackfest3/int +build_p4.sh + +# copy the receiver, sender and helper script to docker container +cd ~/controller/src/tests/hackfest3/int +./copy_int_helpers.sh +./connect_to_mininet.sh +./install-scapy.sh + +# Run the receiver in server screen +cd ~/ngsdn-tutorial +make mn-cli +MN: server screen -S rec + MN/Screen: python receive.py + + d + +# Run the sender in client +MN: client python send.py 10.0.0.1 "test" 1 + +# Check the output in receiver +MN: server screen -r rec + + d diff --git a/src/tests/hackfest3/deploy_specs.sh b/src/tests/hackfest3/deploy_specs.sh new file mode 100755 index 0000000000000000000000000000000000000000..d3f2b5566ec47e58bad906ffa465d22842de2776 --- /dev/null +++ b/src/tests/hackfest3/deploy_specs.sh @@ -0,0 +1,150 @@ +#!/bin/bash +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# ----- TeraFlowSDN ------------------------------------------------------------ + +# Set the URL of the internal MicroK8s Docker registry where the images will be uploaded to. +export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/" + +# Set the list of components, separated by spaces, you want to build images for, and deploy. +export TFS_COMPONENTS="context device pathcomp service slice compute webui load_generator" + +# Uncomment to activate Monitoring +export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring" + +# Uncomment to activate Policy Manager +export TFS_COMPONENTS="${TFS_COMPONENTS} policy" + +# Uncomment to activate Automation Manager +#export TFS_COMPONENTS="${TFS_COMPONENTS} automation" + +# Uncomment to activate Optical CyberSecurity +#export TFS_COMPONENTS="${TFS_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector opticalattackmanager" + +# Uncomment to activate L3 CyberSecurity +#export TFS_COMPONENTS="${TFS_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector" + +# Uncomment to activate TE +#export TFS_COMPONENTS="${TFS_COMPONENTS} te" + +# Set the tag you want to use for your images. +export TFS_IMAGE_TAG="dev" + +# Set the name of the Kubernetes namespace to deploy TFS to. +export TFS_K8S_NAMESPACE="tfs" + +# Set additional manifest files to be applied after the deployment +export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml" + +# Uncomment to monitor performance of components +export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/servicemonitors.yaml" + +# Uncomment when deploying Optical CyberSecurity +#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/cachingservice.yaml" + +# Set the new Grafana admin password +export TFS_GRAFANA_PASSWORD="admin123+" + +# Disable skip-build flag to rebuild the Docker images. +export TFS_SKIP_BUILD="" + + +# ----- CockroachDB ------------------------------------------------------------ + +# Set the namespace where CockroackDB will be deployed. +export CRDB_NAMESPACE="crdb" + +# Set the external port CockroackDB Postgre SQL interface will be exposed to. +export CRDB_EXT_PORT_SQL="26257" + +# Set the external port CockroackDB HTTP Mgmt GUI interface will be exposed to. +export CRDB_EXT_PORT_HTTP="8081" + +# Set the database username to be used by Context. +export CRDB_USERNAME="tfs" + +# Set the database user's password to be used by Context. +export CRDB_PASSWORD="tfs123" + +# Set the database name to be used by Context. +export CRDB_DATABASE="tfs" + +# Set CockroachDB installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/crdb.sh for additional details +export CRDB_DEPLOY_MODE="single" + +# Disable flag for dropping database, if it exists. +export CRDB_DROP_DATABASE_IF_EXISTS="YES" + +# Disable flag for re-deploying CockroachDB from scratch. +export CRDB_REDEPLOY="YES" + + +# ----- NATS ------------------------------------------------------------------- + +# Set the namespace where NATS will be deployed. +export NATS_NAMESPACE="nats" + +# Set the external port NATS Client interface will be exposed to. +export NATS_EXT_PORT_CLIENT="4222" + +# Set the external port NATS HTTP Mgmt GUI interface will be exposed to. +export NATS_EXT_PORT_HTTP="8222" + +# Disable flag for re-deploying NATS from scratch. +export NATS_REDEPLOY="YES" + + +# ----- QuestDB ---------------------------------------------------------------- + +# Set the namespace where QuestDB will be deployed. +export QDB_NAMESPACE="qdb" + +# Set the external port QuestDB Postgre SQL interface will be exposed to. +export QDB_EXT_PORT_SQL="8812" + +# Set the external port QuestDB Influx Line Protocol interface will be exposed to. +export QDB_EXT_PORT_ILP="9009" + +# Set the external port QuestDB HTTP Mgmt GUI interface will be exposed to. +export QDB_EXT_PORT_HTTP="9000" + +# Set the database username to be used for QuestDB. +export QDB_USERNAME="admin" + +# Set the database user's password to be used for QuestDB. +export QDB_PASSWORD="quest" + +# Set the table name to be used by Monitoring for KPIs. +export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis" + +# Set the table name to be used by Slice for plotting groups. +export QDB_TABLE_SLICE_GROUPS="tfs_slice_groups" + +# Disable flag for dropping tables if they exist. +export QDB_DROP_TABLES_IF_EXIST="" + +# Disable flag for re-deploying QuestDB from scratch. +export QDB_REDEPLOY="YES" + + +# ----- K8s Observability ------------------------------------------------------ + +# Set the external port Prometheus Mgmt HTTP GUI interface will be exposed to. +export PROM_EXT_PORT_HTTP="9090" + +# Set the external port Grafana HTTP Dashboards will be exposed to. +export GRAF_EXT_PORT_HTTP="3000" diff --git a/src/tests/hackfest3/grafana/dashboard.json b/src/tests/hackfest3/grafana/dashboard.json new file mode 100644 index 0000000000000000000000000000000000000000..1c464cecf5304e56c2fd871f99e3e44457fb919a --- /dev/null +++ b/src/tests/hackfest3/grafana/dashboard.json @@ -0,0 +1,276 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": 11, + "links": [], + "liveNow": false, + "panels": [ + { + "datasource": { + "type": "postgres", + "uid": "WdiCIPwVk" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": "State" + }, + "properties": [ + { + "id": "mappings", + "value": [ + { + "options": { + "ACTIVE": { + "color": "green", + "index": 2 + }, + "ENFORCED": { + "color": "red", + "index": 3 + }, + "PROVISIONED": { + "color": "yellow", + "index": 1 + }, + "VALIDATED": { + "color": "blue", + "index": 0 + } + }, + "type": "value" + } + ] + } + ] + } + ] + }, + "gridPos": { + "h": 4, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 4, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "/^State$/", + "values": true + }, + "textMode": "auto" + }, + "pluginVersion": "8.5.22", + "targets": [ + { + "datasource": { + "type": "postgres", + "uid": "WdiCIPwVk" + }, + "format": "time_series", + "group": [], + "metricColumn": "none", + "rawQuery": true, + "rawSql": "SELECT\n updated_at as \"time\",\n policyrule_state as \"State\"\nFROM policyrule\nORDER BY 1", + "refId": "A", + "select": [ + [ + { + "params": [ + "policyrule_state" + ], + "type": "column" + } + ] + ], + "table": "policyrule", + "timeColumn": "updated_at", + "timeColumnType": "timestamp", + "where": [ + { + "name": "$__timeFilter", + "params": [], + "type": "macro" + } + ] + } + ], + "title": "Policy State", + "type": "stat" + }, + { + "datasource": { + "type": "postgres", + "uid": "nSmCIEw4k" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "latency", + "axisPlacement": "right", + "axisSoftMin": -1, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "smooth", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "line+area" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "orange", + "value": 10000 + } + ] + }, + "unit": "µs" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 24, + "x": 0, + "y": 4 + }, + "id": 2, + "options": { + "legend": { + "calcs": [], + "displayMode": "hidden", + "placement": "bottom" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "postgres", + "uid": "nSmCIEw4k" + }, + "format": "time_series", + "group": [], + "metricColumn": "none", + "rawQuery": true, + "rawSql": "SELECT\n timestamp AS \"time\",\n kpi_value AS metric\nFROM tfs_monitoring_kpis\nWHERE\n $__timeFilter(timestamp)\nORDER BY timestamp", + "refId": "A", + "select": [ + [ + { + "params": [ + "value" + ], + "type": "column" + } + ] + ], + "timeColumn": "time", + "where": [ + { + "name": "$__timeFilter", + "params": [], + "type": "macro" + } + ] + } + ], + "title": "end-to-end latency", + "type": "timeseries" + } + ], + "refresh": "5s", + "schemaVersion": 36, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-5m", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "HPSR23", + "uid": "F-t42xU4z", + "version": 1, + "weekStart": "" +} diff --git a/src/tests/hackfest3/grafana/only_metrics.json b/src/tests/hackfest3/grafana/only_metrics.json new file mode 100644 index 0000000000000000000000000000000000000000..966fb738e8292dc7ce6b873a8b0c206ad8c95382 --- /dev/null +++ b/src/tests/hackfest3/grafana/only_metrics.json @@ -0,0 +1,157 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": 11, + "links": [], + "liveNow": false, + "panels": [ + { + "datasource": { + "type": "postgres", + "uid": "3xPv3eMIk" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 0 + }, + "id": 2, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "postgres", + "uid": "3xPv3eMIk" + }, + "format": "time_series", + "group": [], + "metricColumn": "none", + "rawQuery": true, + "rawSql": "SELECT\n timestamp AS \"time\",\n kpi_value AS metric\nFROM tfs_monitoring_kpis\nWHERE\n $__timeFilter(timestamp)\nORDER BY timestamp", + "refId": "A", + "select": [ + [ + { + "params": [ + "value" + ], + "type": "column" + } + ] + ], + "timeColumn": "time", + "where": [ + { + "name": "$__timeFilter", + "params": [], + "type": "macro" + } + ] + } + ], + "title": "Panel Title", + "type": "timeseries" + } + ], + "refresh": "5s", + "schemaVersion": 36, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-5m", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "hackfest3", + "uid": "SNg63eGSk", + "version": 1, + "weekStart": "" +} diff --git a/src/tests/hackfest3/grpc/addPolicy.sh b/src/tests/hackfest3/grpc/addPolicy.sh new file mode 100755 index 0000000000000000000000000000000000000000..71300d3ac00ff99786947cdc56b3776f858fcd3e --- /dev/null +++ b/src/tests/hackfest3/grpc/addPolicy.sh @@ -0,0 +1,16 @@ +#!/bin/bash +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +./grpcurl/grpcurl -plaintext -d @ localhost:30060 policy.PolicyService/PolicyAddService < policyAddService.json diff --git a/src/tests/hackfest3/grpc/grpcurl/LICENSE b/src/tests/hackfest3/grpc/grpcurl/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..6b678c507101c0682cfcd340bc97522ccabe7e4d --- /dev/null +++ b/src/tests/hackfest3/grpc/grpcurl/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2017 FullStory, Inc + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/src/tests/hackfest3/grpc/grpcurl/grpcurl b/src/tests/hackfest3/grpc/grpcurl/grpcurl new file mode 100755 index 0000000000000000000000000000000000000000..89a7bd8bfea5ba8d3baa8c636e779a97c0e47448 Binary files /dev/null and b/src/tests/hackfest3/grpc/grpcurl/grpcurl differ diff --git a/src/tests/hackfest3/grpc/policyAddService.json b/src/tests/hackfest3/grpc/policyAddService.json new file mode 100644 index 0000000000000000000000000000000000000000..5cada37425fce4851303858281032373d558dd68 --- /dev/null +++ b/src/tests/hackfest3/grpc/policyAddService.json @@ -0,0 +1,48 @@ +{ + "serviceId": { + "context_id": { + "context_uuid": { + "uuid": "43813baf-195e-5da6-af20-b3d0922e71a7" + } + }, + "service_uuid": { + "uuid": "c7d6c3f4-395e-5973-98d3-8d90f8fc9141" + } + }, + "policyRuleBasic": { + "priority": 0, + "policyRuleId": { + "uuid": { + "uuid": "1" + } + }, + "booleanOperator": "POLICYRULE_CONDITION_BOOLEAN_OR", + "policyRuleState": { + "policyRuleStateMessage": "" + }, + "actionList": [ + { + "action": "POLICY_RULE_ACTION_RECALCULATE_PATH", + "action_config": [ + { + "action_key": "", + "action_value": "" + } + ] + } + ], + "conditionList": [ + { + "numericalOperator": "POLICYRULE_CONDITION_NUMERICAL_GREATER_THAN", + "kpiValue": { + "floatVal": 10000 + }, + "kpiId": { + "kpi_id": { + "uuid": "1" + } + } + } + ] + } +} diff --git a/src/tests/hackfest3/grpc/removePolicy.sh b/src/tests/hackfest3/grpc/removePolicy.sh new file mode 100755 index 0000000000000000000000000000000000000000..fc6103f41224915abdae1c6688b1284d3e5f21ae --- /dev/null +++ b/src/tests/hackfest3/grpc/removePolicy.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Remove policy +./grpcurl/grpcurl -plaintext -d @ localhost:30060 policy.PolicyService/PolicyDelete < removePolicyRule.json diff --git a/src/tests/hackfest3/grpc/removePolicyRule.json b/src/tests/hackfest3/grpc/removePolicyRule.json new file mode 100644 index 0000000000000000000000000000000000000000..2abe6aeb26a9ce1d278dedec8a8bb0f383bb23f8 --- /dev/null +++ b/src/tests/hackfest3/grpc/removePolicyRule.json @@ -0,0 +1,5 @@ +{ + "uuid": { + "uuid": "c4b5e66e-fa99-5075-9b6e-760476791fc1" + } +} diff --git a/src/tests/hackfest3/int/build_p4.sh b/src/tests/hackfest3/int/build_p4.sh new file mode 100755 index 0000000000000000000000000000000000000000..184fe17ec6271ef689f2aa2b280c8e9b1b8c6ee2 --- /dev/null +++ b/src/tests/hackfest3/int/build_p4.sh @@ -0,0 +1,45 @@ +#!/bin/bash +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +get_next_backup_dir() { + local prefix="/home/teraflow/controller/src/tests/hackfest3/p4/backup" + local num=1 + + while [[ -d "$prefix$num" ]]; do + ((num++)) + done + + echo "$prefix$num" +} + +backup_dir=$(get_next_backup_dir) +mkdir "$backup_dir" + +if [[ -d "$backup_dir" ]]; then + mv ~/controller/src/tests/hackfest3/p4/*json "$backup_dir" + mv ~/controller/src/tests/hackfest3/p4/*p4 "$backup_dir" + mv ~/controller/src/tests/hackfest3/p4/*txt "$backup_dir" +else + echo "Backup directory not created. Files were not moved." +fi + +cp $1 ~/controller/src/tests/hackfest3/p4/ + +rm -rf ~/ngsdn-tutorial/p4src/* +cp $1 ~/ngsdn-tutorial/p4src/main.p4 +cd ~/ngsdn-tutorial +make p4-build + +cp ~/ngsdn-tutorial/p4src/build/* ~/controller/src/tests/hackfest3/p4/ diff --git a/src/tests/hackfest3/int/connect_to_mininet.sh b/src/tests/hackfest3/int/connect_to_mininet.sh new file mode 100755 index 0000000000000000000000000000000000000000..a82d3767fc2669e7627bee0b5ca60e5626c920f2 --- /dev/null +++ b/src/tests/hackfest3/int/connect_to_mininet.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +CONTAINER=`docker ps | grep mininet | cut -f1 -d" "` +docker exec -it $CONTAINER /bin/bash diff --git a/src/tests/hackfest3/int/copy_int_helpers.sh b/src/tests/hackfest3/int/copy_int_helpers.sh new file mode 100755 index 0000000000000000000000000000000000000000..726ff55dfb512a34819063bd0bddddb5ad076ce3 --- /dev/null +++ b/src/tests/hackfest3/int/copy_int_helpers.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# get container id +CONTAINER=`docker ps | grep mininet | cut -f1 -d" "` +docker cp send.py $CONTAINER:/root +docker cp receive.py $CONTAINER:/root +docker cp install-scapy.sh $CONTAINER:/root diff --git a/src/tests/hackfest3/int/install-scapy.sh b/src/tests/hackfest3/int/install-scapy.sh new file mode 100755 index 0000000000000000000000000000000000000000..9cfa948f682400032af18d1595a3a79009dc3c49 --- /dev/null +++ b/src/tests/hackfest3/int/install-scapy.sh @@ -0,0 +1,21 @@ +#!/bin/bash +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +sed -i 's/deb.debian.org/archive.debian.org/g' /etc/apt/sources.list +sed -i 's|security.debian.org|archive.debian.org/debian-security/|g' /etc/apt/sources.list +sed -i '/stretch-updates/d' /etc/apt/sources.list +chmod 1777 /tmp +apt update +apt install -y python-scapy diff --git a/src/tests/hackfest3/int/qdepth_int_basic.p4 b/src/tests/hackfest3/int/qdepth_int_basic.p4 new file mode 100644 index 0000000000000000000000000000000000000000..6bef091b96f4a4b59b50a3d97224e003abe2acf0 --- /dev/null +++ b/src/tests/hackfest3/int/qdepth_int_basic.p4 @@ -0,0 +1,278 @@ +/* + * Copyright 2019-present Open Networking Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +#include +#include + +typedef bit<9> port_num_t; +typedef bit<48> mac_addr_t; + +//------------------------------------------------------------------------------ +// HEADER DEFINITIONS +//------------------------------------------------------------------------------ + +#define MAX_INT_HEADERS 9 + +const bit<16> TYPE_IPV4 = 0x800; +const bit<5> IPV4_OPTION_INT = 31; + +typedef bit<9> egressSpec_t; +typedef bit<48> macAddr_t; +typedef bit<32> ip4Addr_t; + +typedef bit<13> switch_id_t; +typedef bit<13> queue_depth_t; +typedef bit<6> output_port_t; + +header ethernet_t { + macAddr_t dstAddr; + macAddr_t srcAddr; + bit<16> etherType; +} + +header ipv4_t { + bit<4> version; + bit<4> ihl; + bit<6> dscp; + bit<2> ecn; + bit<16> totalLen; + bit<16> identification; + bit<3> flags; + bit<13> fragOffset; + bit<8> ttl; + bit<8> protocol; + bit<16> hdrChecksum; + ip4Addr_t srcAddr; + ip4Addr_t dstAddr; +} + +header ipv4_option_t { + bit<1> copyFlag; + bit<2> optClass; + bit<5> option; + bit<8> optionLength; +} + +header int_count_t { + bit<16> num_switches; +} + +header int_header_t { + switch_id_t switch_id; + queue_depth_t queue_depth; + output_port_t output_port; +} + + +struct parser_metadata_t { + bit<16> num_headers_remaining; +} + +struct local_metadata_t { + parser_metadata_t parser_metadata; +} + +struct parsed_headers_t { + ethernet_t ethernet; + ipv4_t ipv4; + ipv4_option_t ipv4_option; + int_count_t int_count; + int_header_t[MAX_INT_HEADERS] int_headers; +} + +error { IPHeaderWithoutOptions } + +//------------------------------------------------------------------------------ +// INGRESS PIPELINE +//------------------------------------------------------------------------------ + +parser ParserImpl(packet_in packet, + out parsed_headers_t hdr, + inout local_metadata_t local_metadata, + inout standard_metadata_t standard_metadata) { + + state start { + + packet.extract(hdr.ethernet); + transition select(hdr.ethernet.etherType){ + TYPE_IPV4: parse_ipv4; + default: accept; + } + } + + state parse_ipv4 { + packet.extract(hdr.ipv4); + //Check if ihl is bigger than 5. Packets without ip options set ihl to 5. + verify(hdr.ipv4.ihl >= 5, error.IPHeaderWithoutOptions); + transition select(hdr.ipv4.ihl) { + 5 : accept; + default : parse_ipv4_option; + } + } + + state parse_ipv4_option { + packet.extract(hdr.ipv4_option); + transition select(hdr.ipv4_option.option){ + + IPV4_OPTION_INT: parse_int; + default: accept; + + } + } + + state parse_int { + packet.extract(hdr.int_count); + local_metadata.parser_metadata.num_headers_remaining = hdr.int_count.num_switches; + transition select(local_metadata.parser_metadata.num_headers_remaining){ + 0: accept; + default: parse_int_headers; + } + } + + state parse_int_headers { + packet.extract(hdr.int_headers.next); + local_metadata.parser_metadata.num_headers_remaining = local_metadata.parser_metadata.num_headers_remaining -1 ; + transition select(local_metadata.parser_metadata.num_headers_remaining){ + 0: accept; + default: parse_int_headers; + } + } +} + +control VerifyChecksumImpl(inout parsed_headers_t hdr, + inout local_metadata_t meta) +{ + apply { /* EMPTY */ } +} + + +control IngressPipeImpl (inout parsed_headers_t hdr, + inout local_metadata_t local_metadata, + inout standard_metadata_t standard_metadata) { + + action drop() { + mark_to_drop(standard_metadata); + } + + action set_egress_port(port_num_t port) { + standard_metadata.egress_spec = port; + } + + // --- l2_exact_table ------------------ + + table l2_exact_table { + key = { + standard_metadata.ingress_port: exact; + } + actions = { + set_egress_port; + @defaultonly drop; + } + const default_action = drop; + } + + apply { + l2_exact_table.apply(); + } +} + +//------------------------------------------------------------------------------ +// EGRESS PIPELINE +//------------------------------------------------------------------------------ + +control EgressPipeImpl (inout parsed_headers_t hdr, + inout local_metadata_t local_metadata, + inout standard_metadata_t standard_metadata) { + + + action add_int_header(switch_id_t swid){ + //increase int stack counter by one + hdr.int_count.num_switches = hdr.int_count.num_switches + 1; + hdr.int_headers.push_front(1); + // This was not needed in older specs. Now by default pushed + // invalid elements are + hdr.int_headers[0].setValid(); + hdr.int_headers[0].switch_id = (bit<13>)swid; + hdr.int_headers[0].queue_depth = (bit<13>)standard_metadata.deq_qdepth; + hdr.int_headers[0].output_port = (bit<6>)standard_metadata.egress_port; + + //update ip header length + hdr.ipv4.ihl = hdr.ipv4.ihl + 1; + hdr.ipv4.totalLen = hdr.ipv4.totalLen + 4; + hdr.ipv4_option.optionLength = hdr.ipv4_option.optionLength + 4; + } + + table int_table { + actions = { + add_int_header; + NoAction; + } + default_action = add_int_header(1); + } + + apply { + if (hdr.int_count.isValid()){ + int_table.apply(); + } + } +} + + +control ComputeChecksumImpl(inout parsed_headers_t hdr, + inout local_metadata_t local_metadata) +{ + apply { + update_checksum( + hdr.ipv4.isValid(), + { hdr.ipv4.version, + hdr.ipv4.ihl, + hdr.ipv4.dscp, + hdr.ipv4.ecn, + hdr.ipv4.totalLen, + hdr.ipv4.identification, + hdr.ipv4.flags, + hdr.ipv4.fragOffset, + hdr.ipv4.ttl, + hdr.ipv4.protocol, + hdr.ipv4.srcAddr, + hdr.ipv4.dstAddr }, + hdr.ipv4.hdrChecksum, + HashAlgorithm.csum16); + } +} + +control DeparserImpl(packet_out packet, in parsed_headers_t hdr) { + apply { + + //parsed headers have to be added again into the packet. + packet.emit(hdr.ethernet); + packet.emit(hdr.ipv4); + packet.emit(hdr.ipv4_option); + packet.emit(hdr.int_count); + packet.emit(hdr.int_headers); + + } +} + +V1Switch( + ParserImpl(), + VerifyChecksumImpl(), + IngressPipeImpl(), + EgressPipeImpl(), + ComputeChecksumImpl(), + DeparserImpl() +) main; diff --git a/src/tests/hackfest3/int/receive.py b/src/tests/hackfest3/int/receive.py new file mode 100644 index 0000000000000000000000000000000000000000..fb1aac1ca2527e7df5981d2a747f2ebc4a6e8775 --- /dev/null +++ b/src/tests/hackfest3/int/receive.py @@ -0,0 +1,76 @@ +#!/usr/bin/env python3 +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import struct + +from scapy.all import sniff, sendp, hexdump, get_if_list, get_if_hwaddr +from scapy.all import Packet, IPOption +from scapy.all import PacketListField, ShortField, IntField, LongField, BitField, FieldListField, FieldLenField +from scapy.all import IP, UDP, Raw +from scapy.layers.inet import _IPOption_HDR + + +def get_if(): + ifs=get_if_list() + iface=None + for i in get_if_list(): + if "eth0" in i: + iface=i + break + if not iface: + print("Cannot find eth0 interface") + exit(1) + return iface + + +class SwitchTrace(Packet): + fields_desc = [ BitField("swid", 0, 13), + BitField("qdepth", 0,13), + BitField("portid",0,6)] + def extract_padding(self, p): + return "", p + + +class IPOption_INT(IPOption): + name = "INT" + option = 31 + fields_desc = [ _IPOption_HDR, + FieldLenField("length", None, fmt="B", + length_of="int_headers", + adjust=lambda pkt,l:l*2+4), + ShortField("count", 0), + PacketListField("int_headers", + [], + SwitchTrace, + count_from=lambda pkt:(pkt.count*1)) ] + + +def handle_pkt(pkt): + print("got a packet") + pkt.show2() + sys.stdout.flush() + + +def main(): + iface = 'server-eth0' + print("sniffing on %s" % iface) + sys.stdout.flush() + sniff(filter="udp and port 4321", iface = iface, + prn = lambda x: handle_pkt(x)) + + +if __name__ == '__main__': + main() diff --git a/src/tests/hackfest3/int/send.py b/src/tests/hackfest3/int/send.py new file mode 100644 index 0000000000000000000000000000000000000000..38b4b4d6274922e5917206681e9502c9d8ac3576 --- /dev/null +++ b/src/tests/hackfest3/int/send.py @@ -0,0 +1,90 @@ +#!/usr/bin/env python3 +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import sys +import socket +import random +import struct + +from scapy.all import sendp, send, hexdump, get_if_list, get_if_hwaddr +from scapy.all import Packet, IPOption +from scapy.all import Ether, IP, UDP +from scapy.all import IntField, FieldListField, FieldLenField, ShortField, PacketListField, BitField +from scapy.layers.inet import _IPOption_HDR + +from time import sleep + + +def get_if(): + ifs=get_if_list() + iface=None + for i in get_if_list(): + if "eth0" in i: + iface=i + break + if not iface: + print("Cannot find eth0 interface") + exit(1) + return iface + + +class SwitchTrace(Packet): + fields_desc = [ BitField("swid", 0, 13), + BitField("qdepth", 0,13), + BitField("portid",0,6)] + def extract_padding(self, p): + return "", p + +class IPOption_INT(IPOption): + name = "INT" + option = 31 + fields_desc = [ _IPOption_HDR, + FieldLenField("length", None, fmt="B", + length_of="int_headers", + adjust=lambda pkt,l:l*2+4), + ShortField("count", 0), + PacketListField("int_headers", + [], + SwitchTrace, + count_from=lambda pkt:(pkt.count*1)) ] + + +def main(): + + if len(sys.argv)<4: + print('pass 3 arguments: "" ') + exit(1) + + addr = socket.gethostbyname(sys.argv[1]) + iface = get_if() + + pkt = Ether(src=get_if_hwaddr(iface), dst="ff:ff:ff:ff:ff:ff") / IP( + dst=addr, options = IPOption_INT(count=0, + int_headers=[])) / UDP( + dport=1234, sport=4321) / sys.argv[2] + + pkt.show2() + + try: + for i in range(int(sys.argv[3])): + sendp(pkt, iface=iface) + sleep(1) + except KeyboardInterrupt: + raise + + +if __name__ == '__main__': + main() diff --git a/src/tests/hackfest3/int/solution/p4_service_handler.py b/src/tests/hackfest3/int/solution/p4_service_handler.py new file mode 100644 index 0000000000000000000000000000000000000000..558f6a590620ec96e4dd3db88599acd037041268 --- /dev/null +++ b/src/tests/hackfest3/int/solution/p4_service_handler.py @@ -0,0 +1,389 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +P4 service handler for the TeraFlowSDN controller. +""" + +import logging +from typing import Any, List, Optional, Tuple, Union +from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method +from common.proto.context_pb2 import ConfigRule, DeviceId, Service +from common.tools.object_factory.ConfigRule import json_config_rule_delete, json_config_rule_set +from common.tools.object_factory.Device import json_device_id +from common.type_checkers.Checkers import chk_type +from service.service.service_handler_api._ServiceHandler import _ServiceHandler +from service.service.task_scheduler.TaskExecutor import TaskExecutor + +LOGGER = logging.getLogger(__name__) + +METRICS_POOL = MetricsPool('Service', 'Handler', labels={'handler': 'p4'}) + +def create_rule_set(endpoint_a, endpoint_b): + return json_config_rule_set( + 'table', + { + 'table-name': 'IngressPipeImpl.l2_exact_table', + 'match-fields': [ + { + 'match-field': 'standard_metadata.ingress_port', + 'match-value': endpoint_a + } + ], + 'action-name': 'IngressPipeImpl.set_egress_port', + 'action-params': [ + { + 'action-param': 'port', + 'action-value': endpoint_b + } + ] + } + ) + +def create_rule_del(endpoint_a, endpoint_b): + return json_config_rule_delete( + 'table', + { + 'table-name': 'IngressPipeImpl.l2_exact_table', + 'match-fields': [ + { + 'match-field': 'standard_metadata.ingress_port', + 'match-value': endpoint_a + } + ], + 'action-name': 'IngressPipeImpl.set_egress_port', + 'action-params': [ + { + 'action-param': 'port', + 'action-value': endpoint_b + } + ] + } + ) + +def create_int_set(endpoint_a, id): + return json_config_rule_set( + 'table', + { + 'table-name': 'EgressPipeImpl.int_table', + 'match-fields': [ + { + 'match-field': 'standard_metadata.ingress_port', + 'match-value': endpoint_a + } + ], + 'action-name': 'EgressPipeImpl.add_int_header', + 'action-params': [ + { + 'action-param': 'swid', + 'action-value': id + } + ] + } + ) + +def create_int_del(endpoint_a, id): + return json_config_rule_delete( + 'table', + { + 'table-name': 'EgressPipeImpl.int_table', + 'match-fields': [ + { + 'match-field': 'standard_metadata.ingress_port', + 'match-value': endpoint_a + } + ], + 'action-name': 'EgressPipeImpl.add_int_header', + 'action-params': [ + { + 'action-param': 'swid', + 'action-value': id + } + ] + } + ) + +def find_names(uuid_a, uuid_b, device_endpoints): + endpoint_a, endpoint_b = None, None + for endpoint in device_endpoints: + if endpoint.endpoint_id.endpoint_uuid.uuid == uuid_a: + endpoint_a = endpoint.name + elif endpoint.endpoint_id.endpoint_uuid.uuid == uuid_b: + endpoint_b = endpoint.name + + return (endpoint_a, endpoint_b) + +class P4ServiceHandler(_ServiceHandler): + def __init__(self, + service: Service, + task_executor : TaskExecutor, + **settings) -> None: + """ Initialize Driver. + Parameters: + service + The service instance (gRPC message) to be managed. + task_executor + An instance of Task Executor providing access to the + service handlers factory, the context and device clients, + and an internal cache of already-loaded gRPC entities. + **settings + Extra settings required by the service handler. + """ + self.__service = service + self.__task_executor = task_executor # pylint: disable=unused-private-member + + @metered_subclass_method(METRICS_POOL) + def SetEndpoint( + self, endpoints : List[Tuple[str, str, Optional[str]]], + connection_uuid : Optional[str] = None + ) -> List[Union[bool, Exception]]: + """ Create/Update service endpoints form a list. + Parameters: + endpoints: List[Tuple[str, str, Optional[str]]] + List of tuples, each containing a device_uuid, + endpoint_uuid and, optionally, the topology_uuid + of the endpoint to be added. + connection_uuid : Optional[str] + If specified, is the UUID of the connection this endpoint is associated to. + Returns: + results: List[Union[bool, Exception]] + List of results for endpoint changes requested. + Return values must be in the same order as the requested + endpoints. If an endpoint is properly added, True must be + returned; otherwise, the Exception that is raised during + the processing must be returned. + """ + chk_type('endpoints', endpoints, list) + if len(endpoints) == 0: return [] + + service_uuid = self.__service.service_id.service_uuid.uuid + + history = {} + + results = [] + index = {} + i = 0 + for endpoint in endpoints: + device_uuid, endpoint_uuid = endpoint[0:2] # ignore topology_uuid by now + if device_uuid in history: + try: + matched_endpoint_uuid = history.pop(device_uuid) + device = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid))) + + del device.device_config.config_rules[:] + + # Find names from uuids + (endpoint_a, endpoint_b) = find_names(matched_endpoint_uuid, endpoint_uuid, device.device_endpoints) + if endpoint_a is None: + LOGGER.exception('Unable to find name of endpoint({:s})'.format(str(matched_endpoint_uuid))) + raise Exception('Unable to find name of endpoint({:s})'.format(str(matched_endpoint_uuid))) + if endpoint_b is None: + LOGGER.exception('Unable to find name of endpoint({:s})'.format(str(endpoint_uuid))) + raise Exception('Unable to find name of endpoint({:s})'.format(str(endpoint_uuid))) + + # One way + rule = create_rule_set(endpoint_a, endpoint_b) + device.device_config.config_rules.append(ConfigRule(**rule)) + # The other way + rule = create_rule_set(endpoint_b, endpoint_a) + device.device_config.config_rules.append(ConfigRule(**rule)) + + rule = create_int_set(endpoint_a, device.name[-1]) + device.device_config.config_rules.append(ConfigRule(**rule)) + + self.__task_executor.configure_device(device) + + results.append(True) + results[index[device_uuid]] = True + except Exception as e: + LOGGER.exception('Unable to SetEndpoint({:s})'.format(str(endpoint))) + results.append(e) + else: + history[device_uuid] = endpoint_uuid + index[device_uuid] = i + results.append(False) + i = i+1 + + return results + + @metered_subclass_method(METRICS_POOL) + def DeleteEndpoint( + self, endpoints : List[Tuple[str, str, Optional[str]]], + connection_uuid : Optional[str] = None + ) -> List[Union[bool, Exception]]: + """ Delete service endpoints form a list. + Parameters: + endpoints: List[Tuple[str, str, Optional[str]]] + List of tuples, each containing a device_uuid, + endpoint_uuid, and the topology_uuid of the endpoint + to be removed. + connection_uuid : Optional[str] + If specified, is the UUID of the connection this endpoint is associated to. + Returns: + results: List[Union[bool, Exception]] + List of results for endpoint deletions requested. + Return values must be in the same order as the requested + endpoints. If an endpoint is properly deleted, True must be + returned; otherwise, the Exception that is raised during + the processing must be returned. + """ + chk_type('endpoints', endpoints, list) + if len(endpoints) == 0: return [] + + service_uuid = self.__service.service_id.service_uuid.uuid + + history = {} + + results = [] + index = {} + i = 0 + for endpoint in endpoints: + device_uuid, endpoint_uuid = endpoint[0:2] # ignore topology_uuid by now + if device_uuid in history: + try: + matched_endpoint_uuid = history.pop(device_uuid) + device = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid))) + + del device.device_config.config_rules[:] + + # Find names from uuids + (endpoint_a, endpoint_b) = find_names(matched_endpoint_uuid, endpoint_uuid, device.device_endpoints) + if endpoint_a is None: + LOGGER.exception('Unable to find name of endpoint({:s})'.format(str(matched_endpoint_uuid))) + raise Exception('Unable to find name of endpoint({:s})'.format(str(matched_endpoint_uuid))) + if endpoint_b is None: + LOGGER.exception('Unable to find name of endpoint({:s})'.format(str(endpoint_uuid))) + raise Exception('Unable to find name of endpoint({:s})'.format(str(endpoint_uuid))) + + # One way + rule = create_rule_del(endpoint_a, endpoint_b) + device.device_config.config_rules.append(ConfigRule(**rule)) + # The other way + rule = create_rule_del(endpoint_b, endpoint_a) + device.device_config.config_rules.append(ConfigRule(**rule)) + + rule = create_int_del(endpoint_a, device.name[-1]) + device.device_config.config_rules.append(ConfigRule(**rule)) + + self.__task_executor.configure_device(device) + + results.append(True) + results[index[device_uuid]] = True + except Exception as e: + LOGGER.exception('Unable to SetEndpoint({:s})'.format(str(endpoint))) + results.append(e) + else: + history[device_uuid] = endpoint_uuid + index[device_uuid] = i + results.append(False) + i = i+1 + + return results + + @metered_subclass_method(METRICS_POOL) + def SetConstraint(self, constraints: List[Tuple[str, Any]]) \ + -> List[Union[bool, Exception]]: + """ Create/Update service constraints. + Parameters: + constraints: List[Tuple[str, Any]] + List of tuples, each containing a constraint_type and the + new constraint_value to be set. + Returns: + results: List[Union[bool, Exception]] + List of results for constraint changes requested. + Return values must be in the same order as the requested + constraints. If a constraint is properly set, True must be + returned; otherwise, the Exception that is raised during + the processing must be returned. + """ + chk_type('constraints', constraints, list) + if len(constraints) == 0: return [] + + msg = '[SetConstraint] Method not implemented. Constraints({:s}) are being ignored.' + LOGGER.warning(msg.format(str(constraints))) + return [True for _ in range(len(constraints))] + + @metered_subclass_method(METRICS_POOL) + def DeleteConstraint(self, constraints: List[Tuple[str, Any]]) \ + -> List[Union[bool, Exception]]: + """ Delete service constraints. + Parameters: + constraints: List[Tuple[str, Any]] + List of tuples, each containing a constraint_type pointing + to the constraint to be deleted, and a constraint_value + containing possible additionally required values to locate + the constraint to be removed. + Returns: + results: List[Union[bool, Exception]] + List of results for constraint deletions requested. + Return values must be in the same order as the requested + constraints. If a constraint is properly deleted, True must + be returned; otherwise, the Exception that is raised during + the processing must be returned. + """ + chk_type('constraints', constraints, list) + if len(constraints) == 0: return [] + + msg = '[DeleteConstraint] Method not implemented. Constraints({:s}) are being ignored.' + LOGGER.warning(msg.format(str(constraints))) + return [True for _ in range(len(constraints))] + + @metered_subclass_method(METRICS_POOL) + def SetConfig(self, resources: List[Tuple[str, Any]]) \ + -> List[Union[bool, Exception]]: + """ Create/Update configuration for a list of service resources. + Parameters: + resources: List[Tuple[str, Any]] + List of tuples, each containing a resource_key pointing to + the resource to be modified, and a resource_value + containing the new value to be set. + Returns: + results: List[Union[bool, Exception]] + List of results for resource key changes requested. + Return values must be in the same order as the requested + resource keys. If a resource is properly set, True must be + returned; otherwise, the Exception that is raised during + the processing must be returned. + """ + chk_type('resources', resources, list) + if len(resources) == 0: return [] + + msg = '[SetConfig] Method not implemented. Resources({:s}) are being ignored.' + LOGGER.warning(msg.format(str(resources))) + return [True for _ in range(len(resources))] + + @metered_subclass_method(METRICS_POOL) + def DeleteConfig(self, resources: List[Tuple[str, Any]]) \ + -> List[Union[bool, Exception]]: + """ Delete configuration for a list of service resources. + Parameters: + resources: List[Tuple[str, Any]] + List of tuples, each containing a resource_key pointing to + the resource to be modified, and a resource_value containing + possible additionally required values to locate the value + to be removed. + Returns: + results: List[Union[bool, Exception]] + List of results for resource key deletions requested. + Return values must be in the same order as the requested + resource keys. If a resource is properly deleted, True must + be returned; otherwise, the Exception that is raised during + the processing must be returned. + """ + chk_type('resources', resources, list) + if len(resources) == 0: return [] + + msg = '[SetConfig] Method not implemented. Resources({:s}) are being ignored.' + LOGGER.warning(msg.format(str(resources))) + return [True for _ in range(len(resources))] \ No newline at end of file diff --git a/src/tests/hackfest3/int/solution/qdepth_int_basic.p4 b/src/tests/hackfest3/int/solution/qdepth_int_basic.p4 new file mode 100644 index 0000000000000000000000000000000000000000..4a4f56c255c31d11c505afd3cf3bf72211ba0317 --- /dev/null +++ b/src/tests/hackfest3/int/solution/qdepth_int_basic.p4 @@ -0,0 +1,281 @@ +/* + * Copyright 2019-present Open Networking Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +#include +#include + +typedef bit<9> port_num_t; +typedef bit<48> mac_addr_t; + +//------------------------------------------------------------------------------ +// HEADER DEFINITIONS +//------------------------------------------------------------------------------ + +#define MAX_INT_HEADERS 9 + +const bit<16> TYPE_IPV4 = 0x800; +const bit<5> IPV4_OPTION_INT = 31; + +typedef bit<9> egressSpec_t; +typedef bit<48> macAddr_t; +typedef bit<32> ip4Addr_t; + +typedef bit<13> switch_id_t; +typedef bit<13> queue_depth_t; +typedef bit<6> output_port_t; + +header ethernet_t { + macAddr_t dstAddr; + macAddr_t srcAddr; + bit<16> etherType; +} + +header ipv4_t { + bit<4> version; + bit<4> ihl; + bit<6> dscp; + bit<2> ecn; + bit<16> totalLen; + bit<16> identification; + bit<3> flags; + bit<13> fragOffset; + bit<8> ttl; + bit<8> protocol; + bit<16> hdrChecksum; + ip4Addr_t srcAddr; + ip4Addr_t dstAddr; +} + +header ipv4_option_t { + bit<1> copyFlag; + bit<2> optClass; + bit<5> option; + bit<8> optionLength; +} + +header int_count_t { + bit<16> num_switches; +} + +header int_header_t { + switch_id_t switch_id; + queue_depth_t queue_depth; + output_port_t output_port; +} + + +struct parser_metadata_t { + bit<16> num_headers_remaining; +} + +struct local_metadata_t { + parser_metadata_t parser_metadata; +} + +struct parsed_headers_t { + ethernet_t ethernet; + ipv4_t ipv4; + ipv4_option_t ipv4_option; + int_count_t int_count; + int_header_t[MAX_INT_HEADERS] int_headers; +} + +error { IPHeaderWithoutOptions } + +//------------------------------------------------------------------------------ +// INGRESS PIPELINE +//------------------------------------------------------------------------------ + +parser ParserImpl(packet_in packet, + out parsed_headers_t hdr, + inout local_metadata_t local_metadata, + inout standard_metadata_t standard_metadata) { + + state start { + + packet.extract(hdr.ethernet); + transition select(hdr.ethernet.etherType){ + TYPE_IPV4: parse_ipv4; + default: accept; + } + } + + state parse_ipv4 { + packet.extract(hdr.ipv4); + //Check if ihl is bigger than 5. Packets without ip options set ihl to 5. + verify(hdr.ipv4.ihl >= 5, error.IPHeaderWithoutOptions); + transition select(hdr.ipv4.ihl) { + 5 : accept; + default : parse_ipv4_option; + } + } + + state parse_ipv4_option { + packet.extract(hdr.ipv4_option); + transition select(hdr.ipv4_option.option){ + + IPV4_OPTION_INT: parse_int; + default: accept; + + } + } + + state parse_int { + packet.extract(hdr.int_count); + local_metadata.parser_metadata.num_headers_remaining = hdr.int_count.num_switches; + transition select(local_metadata.parser_metadata.num_headers_remaining){ + 0: accept; + default: parse_int_headers; + } + } + + state parse_int_headers { + packet.extract(hdr.int_headers.next); + local_metadata.parser_metadata.num_headers_remaining = local_metadata.parser_metadata.num_headers_remaining -1 ; + transition select(local_metadata.parser_metadata.num_headers_remaining){ + 0: accept; + default: parse_int_headers; + } + } +} + +control VerifyChecksumImpl(inout parsed_headers_t hdr, + inout local_metadata_t meta) +{ + apply { /* EMPTY */ } +} + + +control IngressPipeImpl (inout parsed_headers_t hdr, + inout local_metadata_t local_metadata, + inout standard_metadata_t standard_metadata) { + + action drop() { + mark_to_drop(standard_metadata); + } + + action set_egress_port(port_num_t port) { + standard_metadata.egress_spec = port; + } + + // --- l2_exact_table ------------------ + + table l2_exact_table { + key = { + standard_metadata.ingress_port: exact; + } + actions = { + set_egress_port; + @defaultonly drop; + } + const default_action = drop; + } + + apply { + l2_exact_table.apply(); + } +} + +//------------------------------------------------------------------------------ +// EGRESS PIPELINE +//------------------------------------------------------------------------------ + +control EgressPipeImpl (inout parsed_headers_t hdr, + inout local_metadata_t local_metadata, + inout standard_metadata_t standard_metadata) { + + + action add_int_header(switch_id_t swid){ + //increase int stack counter by one + hdr.int_count.num_switches = hdr.int_count.num_switches + 1; + hdr.int_headers.push_front(1); + // This was not needed in older specs. Now by default pushed + // invalid elements are + hdr.int_headers[0].setValid(); + hdr.int_headers[0].switch_id = (bit<13>)swid; + hdr.int_headers[0].queue_depth = (bit<13>)standard_metadata.deq_qdepth; + hdr.int_headers[0].output_port = (bit<6>)standard_metadata.egress_port; + + //update ip header length + hdr.ipv4.ihl = hdr.ipv4.ihl + 1; + hdr.ipv4.totalLen = hdr.ipv4.totalLen + 4; + hdr.ipv4_option.optionLength = hdr.ipv4_option.optionLength + 4; + } + + table int_table { + key = { + standard_metadata.ingress_port: exact; + } + actions = { + add_int_header; + NoAction; + } + default_action = NoAction; + } + + apply { + if (hdr.int_count.isValid()){ + int_table.apply(); + } + } +} + + +control ComputeChecksumImpl(inout parsed_headers_t hdr, + inout local_metadata_t local_metadata) +{ + apply { + update_checksum( + hdr.ipv4.isValid(), + { hdr.ipv4.version, + hdr.ipv4.ihl, + hdr.ipv4.dscp, + hdr.ipv4.ecn, + hdr.ipv4.totalLen, + hdr.ipv4.identification, + hdr.ipv4.flags, + hdr.ipv4.fragOffset, + hdr.ipv4.ttl, + hdr.ipv4.protocol, + hdr.ipv4.srcAddr, + hdr.ipv4.dstAddr }, + hdr.ipv4.hdrChecksum, + HashAlgorithm.csum16); + } +} + +control DeparserImpl(packet_out packet, in parsed_headers_t hdr) { + apply { + + //parsed headers have to be added again into the packet. + packet.emit(hdr.ethernet); + packet.emit(hdr.ipv4); + packet.emit(hdr.ipv4_option); + packet.emit(hdr.int_count); + packet.emit(hdr.int_headers); + + } +} + +V1Switch( + ParserImpl(), + VerifyChecksumImpl(), + IngressPipeImpl(), + EgressPipeImpl(), + ComputeChecksumImpl(), + DeparserImpl() +) main; diff --git a/src/tests/hackfest3/int/solution/timestamp/receive2.py b/src/tests/hackfest3/int/solution/timestamp/receive2.py new file mode 100644 index 0000000000000000000000000000000000000000..0c749f9a48316e7826df409ff8852d40af0fb89e --- /dev/null +++ b/src/tests/hackfest3/int/solution/timestamp/receive2.py @@ -0,0 +1,74 @@ +#!/usr/bin/env python3 +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import struct + +from scapy.all import sniff, sendp, hexdump, get_if_list, get_if_hwaddr +from scapy.all import Packet, IPOption +from scapy.all import PacketListField, ShortField, IntField, LongField, BitField, FieldListField, FieldLenField +from scapy.all import IP, UDP, Raw +from scapy.layers.inet import _IPOption_HDR + + +def get_if(): + ifs=get_if_list() + iface=None + for i in get_if_list(): + if "eth0" in i: + iface=i + break + if not iface: + print("Cannot find eth0 interface") + exit(1) + return iface + + +class SwitchTrace(Packet): + fields_desc = [ BitField("timestamp", 0, 32)] + def extract_padding(self, p): + return "", p + + +class IPOption_INT(IPOption): + name = "INT" + option = 31 + fields_desc = [ _IPOption_HDR, + FieldLenField("length", None, fmt="B", + length_of="int_headers", + adjust=lambda pkt,l:l*2+4), + ShortField("count", 0), + PacketListField("int_headers", + [], + SwitchTrace, + count_from=lambda pkt:(pkt.count*1)) ] + + +def handle_pkt(pkt): + print("got a packet") + pkt.show2() + sys.stdout.flush() + + +def main(): + iface = 'server-eth0' + print("sniffing on %s" % iface) + sys.stdout.flush() + sniff(filter="udp and port 4321", iface = iface, + prn = lambda x: handle_pkt(x)) + + +if __name__ == '__main__': + main() diff --git a/src/tests/hackfest3/int/solution/timestamp/timestamp_int.p4 b/src/tests/hackfest3/int/solution/timestamp/timestamp_int.p4 new file mode 100644 index 0000000000000000000000000000000000000000..5a70ad3401d3e74afddad491e8560f76ae18af0f --- /dev/null +++ b/src/tests/hackfest3/int/solution/timestamp/timestamp_int.p4 @@ -0,0 +1,276 @@ +/* + * Copyright 2019-present Open Networking Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +#include +#include + +typedef bit<9> port_num_t; +typedef bit<48> mac_addr_t; + +//------------------------------------------------------------------------------ +// HEADER DEFINITIONS +//------------------------------------------------------------------------------ + +#define MAX_INT_HEADERS 9 + +const bit<16> TYPE_IPV4 = 0x800; +const bit<5> IPV4_OPTION_INT = 31; + +typedef bit<9> egressSpec_t; +typedef bit<48> macAddr_t; +typedef bit<32> ip4Addr_t; + +typedef bit<13> switch_id_t; +typedef bit<32> queue_depth_t; + +header ethernet_t { + macAddr_t dstAddr; + macAddr_t srcAddr; + bit<16> etherType; +} + +header ipv4_t { + bit<4> version; + bit<4> ihl; + bit<6> dscp; + bit<2> ecn; + bit<16> totalLen; + bit<16> identification; + bit<3> flags; + bit<13> fragOffset; + bit<8> ttl; + bit<8> protocol; + bit<16> hdrChecksum; + ip4Addr_t srcAddr; + ip4Addr_t dstAddr; +} + +header ipv4_option_t { + bit<1> copyFlag; + bit<2> optClass; + bit<5> option; + bit<8> optionLength; +} + +header int_count_t { + bit<16> num_switches; +} + +header int_header_t { + queue_depth_t timestamp; +} + + +struct parser_metadata_t { + bit<16> num_headers_remaining; +} + +struct local_metadata_t { + parser_metadata_t parser_metadata; +} + +struct parsed_headers_t { + ethernet_t ethernet; + ipv4_t ipv4; + ipv4_option_t ipv4_option; + int_count_t int_count; + int_header_t[MAX_INT_HEADERS] int_headers; +} + +error { IPHeaderWithoutOptions } + +//------------------------------------------------------------------------------ +// INGRESS PIPELINE +//------------------------------------------------------------------------------ + +parser ParserImpl(packet_in packet, + out parsed_headers_t hdr, + inout local_metadata_t local_metadata, + inout standard_metadata_t standard_metadata) { + + state start { + + packet.extract(hdr.ethernet); + transition select(hdr.ethernet.etherType){ + TYPE_IPV4: parse_ipv4; + default: accept; + } + } + + state parse_ipv4 { + packet.extract(hdr.ipv4); + //Check if ihl is bigger than 5. Packets without ip options set ihl to 5. + verify(hdr.ipv4.ihl >= 5, error.IPHeaderWithoutOptions); + transition select(hdr.ipv4.ihl) { + 5 : accept; + default : parse_ipv4_option; + } + } + + state parse_ipv4_option { + packet.extract(hdr.ipv4_option); + transition select(hdr.ipv4_option.option){ + + IPV4_OPTION_INT: parse_int; + default: accept; + + } + } + + state parse_int { + packet.extract(hdr.int_count); + local_metadata.parser_metadata.num_headers_remaining = hdr.int_count.num_switches; + transition select(local_metadata.parser_metadata.num_headers_remaining){ + 0: accept; + default: parse_int_headers; + } + } + + state parse_int_headers { + packet.extract(hdr.int_headers.next); + local_metadata.parser_metadata.num_headers_remaining = local_metadata.parser_metadata.num_headers_remaining -1 ; + transition select(local_metadata.parser_metadata.num_headers_remaining){ + 0: accept; + default: parse_int_headers; + } + } +} + +control VerifyChecksumImpl(inout parsed_headers_t hdr, + inout local_metadata_t meta) +{ + apply { /* EMPTY */ } +} + + +control IngressPipeImpl (inout parsed_headers_t hdr, + inout local_metadata_t local_metadata, + inout standard_metadata_t standard_metadata) { + + action drop() { + mark_to_drop(standard_metadata); + } + + action set_egress_port(port_num_t port) { + standard_metadata.egress_spec = port; + } + + // --- l2_exact_table ------------------ + + table l2_exact_table { + key = { + standard_metadata.ingress_port: exact; + } + actions = { + set_egress_port; + @defaultonly drop; + } + const default_action = drop; + } + + apply { + l2_exact_table.apply(); + } +} + +//------------------------------------------------------------------------------ +// EGRESS PIPELINE +//------------------------------------------------------------------------------ + +control EgressPipeImpl (inout parsed_headers_t hdr, + inout local_metadata_t local_metadata, + inout standard_metadata_t standard_metadata) { + + + action add_int_header(switch_id_t swid){ + //increase int stack counter by one + hdr.int_count.num_switches = hdr.int_count.num_switches + 1; + hdr.int_headers.push_front(1); + // This was not needed in older specs. Now by default pushed + // invalid elements are + hdr.int_headers[0].setValid(); + hdr.int_headers[0].timestamp = (bit<32>)standard_metadata.ingress_global_timestamp; + + //update ip header length + hdr.ipv4.ihl = hdr.ipv4.ihl + 1; + hdr.ipv4.totalLen = hdr.ipv4.totalLen + 4; + hdr.ipv4_option.optionLength = hdr.ipv4_option.optionLength + 4; + } + + table int_table { + key = { + standard_metadata.ingress_port: exact; + } + actions = { + add_int_header; + NoAction; + } + default_action = NoAction; + } + + apply { + if (hdr.int_count.isValid()){ + int_table.apply(); + } + } +} + + +control ComputeChecksumImpl(inout parsed_headers_t hdr, + inout local_metadata_t local_metadata) +{ + apply { + update_checksum( + hdr.ipv4.isValid(), + { hdr.ipv4.version, + hdr.ipv4.ihl, + hdr.ipv4.dscp, + hdr.ipv4.ecn, + hdr.ipv4.totalLen, + hdr.ipv4.identification, + hdr.ipv4.flags, + hdr.ipv4.fragOffset, + hdr.ipv4.ttl, + hdr.ipv4.protocol, + hdr.ipv4.srcAddr, + hdr.ipv4.dstAddr }, + hdr.ipv4.hdrChecksum, + HashAlgorithm.csum16); + } +} + +control DeparserImpl(packet_out packet, in parsed_headers_t hdr) { + apply { + + //parsed headers have to be added again into the packet. + packet.emit(hdr.ethernet); + packet.emit(hdr.ipv4); + packet.emit(hdr.ipv4_option); + packet.emit(hdr.int_count); + packet.emit(hdr.int_headers); + + } +} + +V1Switch( + ParserImpl(), + VerifyChecksumImpl(), + IngressPipeImpl(), + EgressPipeImpl(), + ComputeChecksumImpl(), + DeparserImpl() +) main; diff --git a/src/tests/hackfest3/mininet/4switch2path.py b/src/tests/hackfest3/mininet/4switch2path.py new file mode 100755 index 0000000000000000000000000000000000000000..d8ad04b0193a2b9b610a4d5f828891e575d8efe8 --- /dev/null +++ b/src/tests/hackfest3/mininet/4switch2path.py @@ -0,0 +1,110 @@ +#!/usr/bin/python + +# Copyright 2019-present Open Networking Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse + +from mininet.cli import CLI +from mininet.log import setLogLevel +from mininet.net import Mininet +from mininet.node import Host +from mininet.topo import Topo +from stratum import StratumBmv2Switch + +CPU_PORT = 255 + +class IPv4Host(Host): + """Host that can be configured with an IPv4 gateway (default route). + """ + + def config(self, mac=None, ip=None, defaultRoute=None, lo='up', gw=None, + **_params): + super(IPv4Host, self).config(mac, ip, defaultRoute, lo, **_params) + self.cmd('ip -4 addr flush dev %s' % self.defaultIntf()) + self.cmd('ip -6 addr flush dev %s' % self.defaultIntf()) + self.cmd('ip -4 link set up %s' % self.defaultIntf()) + self.cmd('ip -4 addr add %s dev %s' % (ip, self.defaultIntf())) + if gw: + self.cmd('ip -4 route add default via %s' % gw) + # Disable offload + for attr in ["rx", "tx", "sg"]: + cmd = "/sbin/ethtool --offload %s %s off" % ( + self.defaultIntf(), attr) + self.cmd(cmd) + + def updateIP(): + return ip.split('/')[0] + + self.defaultIntf().updateIP = updateIP + +class TutorialTopo(Topo): + """Basic Server-Client topology with IPv4 hosts""" + + def __init__(self, *args, **kwargs): + Topo.__init__(self, *args, **kwargs) + + # Switches + # gRPC port 50001 + switch1 = self.addSwitch('switch1', cls=StratumBmv2Switch, cpuport=CPU_PORT) + # gRPC port 50002 + switch2 = self.addSwitch('switch2', cls=StratumBmv2Switch, cpuport=CPU_PORT) + # gRPC port 50003 + switch3 = self.addSwitch('switch3', cls=StratumBmv2Switch, cpuport=CPU_PORT) + # gRPC port 50004 + switch4 = self.addSwitch('switch4', cls=StratumBmv2Switch, cpuport=CPU_PORT) + + # Hosts + client = self.addHost('client', cls=IPv4Host, mac="aa:bb:cc:dd:ee:11", + ip='10.0.0.1/24', gw='10.0.0.100') + server = self.addHost('server', cls=IPv4Host, mac="aa:bb:cc:dd:ee:22", + ip='10.0.0.2/24', gw='10.0.0.100') + + # Switch links + self.addLink(switch1, switch2) # Switch1:port 1, Switch2:port 1 + self.addLink(switch1, switch3) # Switch1:port 2, Switch3:port 1 + self.addLink(switch2, switch4) # Switch2:port 2, Switch4:port 1 + self.addLink(switch3, switch4) # Switch3:port 2, Switch4:port 2 + + # Host links + self.addLink(client, switch1) # Switch 1: port 3 + self.addLink(server, switch4) # Switch 4: port 3 + +def main(): + net = Mininet(topo=TutorialTopo(), controller=None) + net.start() + + client = net.hosts[0] + client.setARP('10.0.0.2', 'aa:bb:cc:dd:ee:22') + server = net.hosts[1] + server.setARP('10.0.0.1', 'aa:bb:cc:dd:ee:11') + + CLI(net) + net.stop() + print '#' * 80 + print 'ATTENTION: Mininet was stopped! Perhaps accidentally?' + print 'No worries, it will restart automatically in a few seconds...' + print 'To access again the Mininet CLI, use `make mn-cli`' + print 'To detach from the CLI (without stopping), press Ctrl-D' + print 'To permanently quit Mininet, use `make stop`' + print '#' * 80 + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description='Mininet topology script for 2x2 fabric with stratum_bmv2 and IPv4 hosts') + args = parser.parse_args() + setLogLevel('info') + + main() diff --git a/src/tests/hackfest3/new-probe/agent.py b/src/tests/hackfest3/new-probe/agent.py new file mode 100644 index 0000000000000000000000000000000000000000..3a89f0f1eb69168e188bdcc0881cf3fe97442d2c --- /dev/null +++ b/src/tests/hackfest3/new-probe/agent.py @@ -0,0 +1,135 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#import copy, logging, pytest +#from common.tests.EventTools import EVENT_CREATE, EVENT_UPDATE, check_events +#from common.tools.object_factory.Context import json_context_id +#from common.tools.object_factory.Device import json_device_id +#from common.tools.object_factory.Service import json_service_id +#from common.tools.object_factory.Link import json_link_id +#from common.tools.object_factory.Topology import json_topology_id +#from context.client.EventsCollector import EventsCollector +#from common.proto.context_pb2 import Context, ContextId, Device, Empty, Link, Topology, Service, ServiceId +#from monitoring.client.MonitoringClient import MonitoringClient +#from common.proto.context_pb2 import ConfigActionEnum, Device, DeviceId, DeviceOperationalStatusEnum + +import os, threading, time, socket +from common.Settings import get_setting +from common.proto.context_pb2 import Empty, Timestamp +from common.proto.monitoring_pb2 import KpiDescriptor, Kpi, KpiId, KpiValue +from common.proto.kpi_sample_types_pb2 import KpiSampleType +from monitoring.client.MonitoringClient import MonitoringClient +from context.client.ContextClient import ContextClient + +# ----- If you want to use .env file +#from dotenv import load_dotenv +#load_dotenv() +#def get_setting(key): +# return os.getenv(key) + + +#### gRPC Clients +monitoring_client = MonitoringClient(get_setting('MONITORINGSERVICE_SERVICE_HOST'), get_setting('MONITORINGSERVICE_SERVICE_PORT_GRPC')) +context_client = ContextClient(get_setting('CONTEXTSERVICE_SERVICE_HOST'), get_setting('CONTEXTSERVICE_SERVICE_PORT_GRPC')) + +### Locks and common variables +# Lock for kpi_id +kpi_id_lock = threading.Lock() +kpi_id = KpiId() +# Lock to know if we have registered a KPI or not +enabled_lock = threading.Lock() +enabled = False + +### Define the path to the Unix socket +socket_path = "/home/teraflow/ngsdn-tutorial/tmp/sock" +if os.path.exists(socket_path): + os.remove(socket_path) + +def thread_context_func(): + global kpi_id + global enabled + while True: +########################################################## +################## YOUR INPUT HERE ####################### +########################################################## + # Listen for Context Service Events + # Differentiate based on event type + # if event_type == service created: + # Create KpiDescriptor + # Register Kpi and keep kpi_id + # if event_type == service removed: + # stop sending values +########################################################## +##################### UNTIL HERE ######################### +########################################################## + +def thread_kpi_func(): + global kpi_id + global enabled + try: + # Create socket object + server_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + # Bind the socket to the socket path + server_socket.bind(socket_path) + # Listen for incoming connections + server_socket.listen(1) + while True: + print("Awaiting for new connection!") + # Accept incoming connection + connection, client_address = server_socket.accept() + # Read data from the connection + data = connection.recv(1024) + if data: + with enabled_lock: + if enabled: +########################################################## +################## YOUR INPUT HERE ####################### +########################################################## + # if we have registered a KPI + #store value to data + data = data.decode() + print(f"Received: {data}") + with kpi_id_lock: + # create Kpi + # send Kpi to Monitoring +########################################################## +##################### UNTIL HERE ######################### +########################################################## + # Close the connection + connection.close() + except Exception as e: + print(f"Error: {str(e)}") + + +def main(): + + # Start Thread that listens to context events + thread_context = threading.Thread(target=thread_context_func) + thread_context.daemon = True + thread_context.start() + + # Start Thread that listens to socket + thread_kpi = threading.Thread(target=thread_kpi_func) + thread_kpi.daemon = True + thread_kpi.start() + + try: + while True: + time.sleep(1) + except KeyboardInterrupt: + os.remove(socket_path) + print("Script terminated.") + +if __name__ == "__main__": + main() diff --git a/src/tests/hackfest3/new-probe/connect_to_mininet.sh b/src/tests/hackfest3/new-probe/connect_to_mininet.sh new file mode 100755 index 0000000000000000000000000000000000000000..a82d3767fc2669e7627bee0b5ca60e5626c920f2 --- /dev/null +++ b/src/tests/hackfest3/new-probe/connect_to_mininet.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +CONTAINER=`docker ps | grep mininet | cut -f1 -d" "` +docker exec -it $CONTAINER /bin/bash diff --git a/src/tests/hackfest3/new-probe/copy.sh b/src/tests/hackfest3/new-probe/copy.sh new file mode 100755 index 0000000000000000000000000000000000000000..a01bc3235ddb62b2daaf8e6f3c1b09e8a28fac9f --- /dev/null +++ b/src/tests/hackfest3/new-probe/copy.sh @@ -0,0 +1,18 @@ +#!/bin/bash +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# get container id +CONTAINER=`docker ps | grep mininet | cut -f1 -d" "` +docker cp ping2.py $CONTAINER:/root diff --git a/src/tests/hackfest3/new-probe/ping2.py b/src/tests/hackfest3/new-probe/ping2.py new file mode 100644 index 0000000000000000000000000000000000000000..a58c68c91bf50dfe0bea6fc81b4709d006a21351 --- /dev/null +++ b/src/tests/hackfest3/new-probe/ping2.py @@ -0,0 +1,62 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import socket, re, time, subprocess, sys + +# Path of the socket inside mininet container +socket_path = "/tmp/sock" + +def main(): + hostname = sys.argv[1] + + try: + while True: + start_time = time.time() + + try: + # Run the ping command once and capture the output + response_time = 0 + except subprocess.CalledProcessError as e: + # If ping fails (even if it does not reach destination) + # This part is executed + response_time = -1 + + print("Latency: {} ms".format(response_time)) + + # Uncomment the following when ready to write to socket + #data = str(response_time) + # + # Write results in socket + #try: + # client_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + # client_socket.connect(socket_path) + # client_socket.send(data.encode()) + # client_socket.close() + #except Exception as e: + # print(e) + + # The following is to make sure that we ping at least + # every 6 seconds regardless of how much time ping took. + # Calculate the time taken by ping + execution_time = time.time() - start_time + # Wait the rest of the time + wait_time = max(0, 6 - execution_time) + time.sleep(wait_time) + + except KeyboardInterrupt: + print("Script terminated.") + +if __name__ == "__main__": + main() + diff --git a/src/tests/hackfest3/new-probe/solution/agent.py b/src/tests/hackfest3/new-probe/solution/agent.py new file mode 100644 index 0000000000000000000000000000000000000000..058caa7fb8b56e13ed8d4d532515c71f1d3934cd --- /dev/null +++ b/src/tests/hackfest3/new-probe/solution/agent.py @@ -0,0 +1,165 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#import copy, logging, pytest +#from common.tests.EventTools import EVENT_CREATE, EVENT_UPDATE, check_events +#from common.tools.object_factory.Context import json_context_id +#from common.tools.object_factory.Device import json_device_id +#from common.tools.object_factory.Service import json_service_id +#from common.tools.object_factory.Link import json_link_id +#from common.tools.object_factory.Topology import json_topology_id +#from context.client.EventsCollector import EventsCollector +#from common.proto.context_pb2 import Context, ContextId, Device, Empty, Link, Topology, Service, ServiceId +#from monitoring.client.MonitoringClient import MonitoringClient +#from common.proto.context_pb2 import ConfigActionEnum, Device, DeviceId, DeviceOperationalStatusEnum + +import os, threading, time, socket +from common.Settings import get_setting +from common.proto.context_pb2 import Empty, Timestamp +from common.proto.monitoring_pb2 import KpiDescriptor, Kpi, KpiId, KpiValue +from common.proto.kpi_sample_types_pb2 import KpiSampleType +from monitoring.client.MonitoringClient import MonitoringClient +from context.client.ContextClient import ContextClient + +# ----- If you want to use .env file +#from dotenv import load_dotenv +#load_dotenv() +#def get_setting(key): +# return os.getenv(key) + + +#### gRPC Clients +monitoring_client = MonitoringClient(get_setting('MONITORINGSERVICE_SERVICE_HOST'), get_setting('MONITORINGSERVICE_SERVICE_PORT_GRPC')) +context_client = ContextClient(get_setting('CONTEXTSERVICE_SERVICE_HOST'), get_setting('CONTEXTSERVICE_SERVICE_PORT_GRPC')) + +### Locks and common variables +enabled_lock = threading.Lock() +kpi_id_lock = threading.Lock() +kpi_id = KpiId() +enabled = False + +### Define the path to the Unix socket +socket_path = "/home/teraflow/ngsdn-tutorial/tmp/sock" +#socket_path = "./tmp/sock" +if os.path.exists(socket_path): + os.remove(socket_path) + +def thread_context_func(): + global kpi_id + global enabled + while True: + # Listen to ContextService/GetServiceEvents stream + events = context_client.GetServiceEvents(Empty()) + for event in events: + event_service = event.service_id + event_service_uuid = event_service.service_uuid.uuid + event_type = event.event.event_type + if event_type == 1: + print(f"stream: New CREATE event:\n{event_service}") + kpi_descriptor = KpiDescriptor( + kpi_id = None, + kpi_id_list = [], + device_id = None, + endpoint_id = None, + kpi_description = f"Loss Ratio for service {event_service_uuid}", + service_id = event_service, + kpi_sample_type = KpiSampleType.KPISAMPLETYPE_UNKNOWN + ) + response = monitoring_client.SetKpi(kpi_descriptor) + print(response) + with kpi_id_lock: + kpi_id = response + print(kpi_id) + with enabled_lock: + enabled = True + elif event_type == 3: + print(f"stream: New REMOVE event:\n{event_service}") + with enabled_lock: + enabled = False + +def thread_kpi_func(): + global kpi_id + global enabled + try: + # Create socket object + server_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + + # Bind the socket to the socket path + server_socket.bind(socket_path) + + # Listen for incoming connections + server_socket.listen(1) + + while True: + print("Awaiting for new connection!") + + # Accept incoming connection + connection, client_address = server_socket.accept() + + # Read data from the connection + data = connection.recv(1024) + + if data: + with enabled_lock: + if enabled: + data = data.decode() + print(f"Received: {data}") + with kpi_id_lock: + + now = time.time() + + new_timestamp = Timestamp() + new_timestamp.timestamp = now + + new_value = KpiValue() + new_value.floatVal = float(data) + + kpi = Kpi ( + kpi_id = kpi_id, + timestamp = new_timestamp, + kpi_value = new_value + ) + print(kpi) + response = monitoring_client.IncludeKpi(kpi) + print(f"response: {response}") + + # Close the connection + connection.close() + + + except Exception as e: + print(f"Error: {str(e)}") + + +def main(): + + # Start Thread that listens to context events + thread_context = threading.Thread(target=thread_context_func) + thread_context.daemon = True + thread_context.start() + + # Start Thread that listens to socket + thread_kpi = threading.Thread(target=thread_kpi_func) + thread_kpi.daemon = True + thread_kpi.start() + + try: + while True: + time.sleep(1) + except KeyboardInterrupt: + os.remove(socket_path) + print("Script terminated.") + +if __name__ == "__main__": + main() diff --git a/src/tests/hackfest3/new-probe/solution/connect_to_mininet.sh b/src/tests/hackfest3/new-probe/solution/connect_to_mininet.sh new file mode 100755 index 0000000000000000000000000000000000000000..a82d3767fc2669e7627bee0b5ca60e5626c920f2 --- /dev/null +++ b/src/tests/hackfest3/new-probe/solution/connect_to_mininet.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +CONTAINER=`docker ps | grep mininet | cut -f1 -d" "` +docker exec -it $CONTAINER /bin/bash diff --git a/src/tests/hackfest3/new-probe/solution/copy.sh b/src/tests/hackfest3/new-probe/solution/copy.sh new file mode 100755 index 0000000000000000000000000000000000000000..a01bc3235ddb62b2daaf8e6f3c1b09e8a28fac9f --- /dev/null +++ b/src/tests/hackfest3/new-probe/solution/copy.sh @@ -0,0 +1,18 @@ +#!/bin/bash +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# get container id +CONTAINER=`docker ps | grep mininet | cut -f1 -d" "` +docker cp ping2.py $CONTAINER:/root diff --git a/src/tests/hackfest3/new-probe/solution/ping2.py b/src/tests/hackfest3/new-probe/solution/ping2.py new file mode 100644 index 0000000000000000000000000000000000000000..7f3a97c83bd0734e60874f90873e58bfad4e5ae7 --- /dev/null +++ b/src/tests/hackfest3/new-probe/solution/ping2.py @@ -0,0 +1,75 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import socket, re, time, subprocess, sys + +socket_path = "/tmp/sock" +#socket_path = "./tmp/sock" + +def main(): + hostname = sys.argv[1] + count = 1 + wait = 5 + + total_pings = 0 + successful_pings = 0 + try: + while True: + start_time = time.time() + + try: + # Run the ping command and capture the output + result = subprocess.check_output(["ping", "-W", str(wait), "-c", str(count), hostname], universal_newlines=True) + + response_time = float(re.findall(r"time=([0-9.]+) ms", result)[0]) + + except subprocess.CalledProcessError as e: + # If ping fails return negative response_time + response_time = -1 + + # Calculate new loss_ratio + if response_time != -1: + successful_pings += 1 + total_pings += 1 + moving_loss_ratio = round(((total_pings - successful_pings) / float(total_pings) * 100), 2) + + print("Total pings: {}".format(total_pings)) + print("Successful pings: {}".format(successful_pings)) + + print("Packet loss: {}%".format(moving_loss_ratio)) + print("Latency: {} ms".format(response_time)) + + data = str(response_time) + + # Write results in socket + try: + client_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + client_socket.connect(socket_path) + client_socket.send(data.encode()) + client_socket.close() + except Exception as e: + print(e) + + # Calculate the time taken by ping + execution_time = time.time() - start_time + # Wait the rest of the time + wait_time = max(0, 6 - execution_time) + time.sleep(wait_time) + + except KeyboardInterrupt: + print("Script terminated.") + +if __name__ == "__main__": + main() + diff --git a/src/tests/hackfest3/p4/bmv2.json b/src/tests/hackfest3/p4/bmv2.json new file mode 100644 index 0000000000000000000000000000000000000000..f001eb52e90e875c4152f4d7820664402ac856c3 --- /dev/null +++ b/src/tests/hackfest3/p4/bmv2.json @@ -0,0 +1,381 @@ +{ + "header_types" : [ + { + "name" : "scalars_0", + "id" : 0, + "fields" : [ + ["local_metadata_t.is_multicast", 1, false], + ["_padding_0", 7, false] + ] + }, + { + "name" : "standard_metadata", + "id" : 1, + "fields" : [ + ["ingress_port", 9, false], + ["egress_spec", 9, false], + ["egress_port", 9, false], + ["clone_spec", 32, false], + ["instance_type", 32, false], + ["drop", 1, false], + ["recirculate_port", 16, false], + ["packet_length", 32, false], + ["enq_timestamp", 32, false], + ["enq_qdepth", 19, false], + ["deq_timedelta", 32, false], + ["deq_qdepth", 19, false], + ["ingress_global_timestamp", 48, false], + ["egress_global_timestamp", 48, false], + ["lf_field_list", 32, false], + ["mcast_grp", 16, false], + ["resubmit_flag", 32, false], + ["egress_rid", 16, false], + ["recirculate_flag", 32, false], + ["checksum_error", 1, false], + ["parser_error", 32, false], + ["priority", 3, false], + ["_padding", 2, false] + ] + }, + { + "name" : "ethernet_t", + "id" : 2, + "fields" : [ + ["dst_addr", 48, false], + ["src_addr", 48, false], + ["ether_type", 16, false] + ] + } + ], + "headers" : [ + { + "name" : "scalars", + "id" : 0, + "header_type" : "scalars_0", + "metadata" : true, + "pi_omit" : true + }, + { + "name" : "standard_metadata", + "id" : 1, + "header_type" : "standard_metadata", + "metadata" : true, + "pi_omit" : true + }, + { + "name" : "ethernet", + "id" : 2, + "header_type" : "ethernet_t", + "metadata" : false, + "pi_omit" : true + } + ], + "header_stacks" : [], + "header_union_types" : [], + "header_unions" : [], + "header_union_stacks" : [], + "field_lists" : [], + "errors" : [ + ["NoError", 1], + ["PacketTooShort", 2], + ["NoMatch", 3], + ["StackOutOfBounds", 4], + ["HeaderTooShort", 5], + ["ParserTimeout", 6], + ["ParserInvalidArgument", 7] + ], + "enums" : [], + "parsers" : [ + { + "name" : "parser", + "id" : 0, + "init_state" : "start", + "parse_states" : [ + { + "name" : "start", + "id" : 0, + "parser_ops" : [ + { + "parameters" : [ + { + "type" : "regular", + "value" : "ethernet" + } + ], + "op" : "extract" + } + ], + "transitions" : [ + { + "value" : "default", + "mask" : null, + "next_state" : null + } + ], + "transition_key" : [] + } + ] + } + ], + "parse_vsets" : [], + "deparsers" : [ + { + "name" : "deparser", + "id" : 0, + "source_info" : { + "filename" : "p4src/main.p4", + "line" : 130, + "column" : 8, + "source_fragment" : "DeparserImpl" + }, + "order" : ["ethernet"] + } + ], + "meter_arrays" : [], + "counter_arrays" : [], + "register_arrays" : [], + "calculations" : [], + "learn_lists" : [], + "actions" : [ + { + "name" : "IngressPipeImpl.drop", + "id" : 0, + "runtime_data" : [], + "primitives" : [ + { + "op" : "mark_to_drop", + "parameters" : [ + { + "type" : "header", + "value" : "standard_metadata" + } + ], + "source_info" : { + "filename" : "p4src/main.p4", + "line" : 77, + "column" : 8, + "source_fragment" : "mark_to_drop(standard_metadata)" + } + } + ] + }, + { + "name" : "IngressPipeImpl.set_egress_port", + "id" : 1, + "runtime_data" : [ + { + "name" : "port", + "bitwidth" : 9 + } + ], + "primitives" : [ + { + "op" : "assign", + "parameters" : [ + { + "type" : "field", + "value" : ["standard_metadata", "egress_spec"] + }, + { + "type" : "runtime_data", + "value" : 0 + } + ], + "source_info" : { + "filename" : "p4src/main.p4", + "line" : 81, + "column" : 8, + "source_fragment" : "standard_metadata.egress_spec = port" + } + } + ] + }, + { + "name" : "IngressPipeImpl.set_multicast_group", + "id" : 2, + "runtime_data" : [ + { + "name" : "gid", + "bitwidth" : 16 + } + ], + "primitives" : [ + { + "op" : "assign", + "parameters" : [ + { + "type" : "field", + "value" : ["standard_metadata", "mcast_grp"] + }, + { + "type" : "runtime_data", + "value" : 0 + } + ], + "source_info" : { + "filename" : "p4src/main.p4", + "line" : 89, + "column" : 8, + "source_fragment" : "standard_metadata.mcast_grp = gid" + } + }, + { + "op" : "assign", + "parameters" : [ + { + "type" : "field", + "value" : ["scalars", "local_metadata_t.is_multicast"] + }, + { + "type" : "expression", + "value" : { + "type" : "expression", + "value" : { + "op" : "b2d", + "left" : null, + "right" : { + "type" : "bool", + "value" : true + } + } + } + } + ], + "source_info" : { + "filename" : "p4src/main.p4", + "line" : 90, + "column" : 8, + "source_fragment" : "local_metadata.is_multicast = true" + } + } + ] + } + ], + "pipelines" : [ + { + "name" : "ingress", + "id" : 0, + "source_info" : { + "filename" : "p4src/main.p4", + "line" : 71, + "column" : 8, + "source_fragment" : "IngressPipeImpl" + }, + "init_table" : "IngressPipeImpl.l2_exact_table", + "tables" : [ + { + "name" : "IngressPipeImpl.l2_exact_table", + "id" : 0, + "source_info" : { + "filename" : "p4src/main.p4", + "line" : 95, + "column" : 10, + "source_fragment" : "l2_exact_table" + }, + "key" : [ + { + "match_type" : "exact", + "name" : "standard_metadata.ingress_port", + "target" : ["standard_metadata", "ingress_port"], + "mask" : null + } + ], + "match_type" : "exact", + "type" : "simple", + "max_size" : 1024, + "with_counters" : false, + "support_timeout" : false, + "direct_meters" : null, + "action_ids" : [1, 2, 0], + "actions" : ["IngressPipeImpl.set_egress_port", "IngressPipeImpl.set_multicast_group", "IngressPipeImpl.drop"], + "base_default_next" : null, + "next_tables" : { + "IngressPipeImpl.set_egress_port" : null, + "IngressPipeImpl.set_multicast_group" : null, + "IngressPipeImpl.drop" : null + }, + "default_entry" : { + "action_id" : 0, + "action_const" : true, + "action_data" : [], + "action_entry_const" : true + } + } + ], + "action_profiles" : [], + "conditionals" : [] + }, + { + "name" : "egress", + "id" : 1, + "source_info" : { + "filename" : "p4src/main.p4", + "line" : 116, + "column" : 8, + "source_fragment" : "EgressPipeImpl" + }, + "init_table" : null, + "tables" : [], + "action_profiles" : [], + "conditionals" : [] + } + ], + "checksums" : [], + "force_arith" : [], + "extern_instances" : [], + "field_aliases" : [ + [ + "queueing_metadata.enq_timestamp", + ["standard_metadata", "enq_timestamp"] + ], + [ + "queueing_metadata.enq_qdepth", + ["standard_metadata", "enq_qdepth"] + ], + [ + "queueing_metadata.deq_timedelta", + ["standard_metadata", "deq_timedelta"] + ], + [ + "queueing_metadata.deq_qdepth", + ["standard_metadata", "deq_qdepth"] + ], + [ + "intrinsic_metadata.ingress_global_timestamp", + ["standard_metadata", "ingress_global_timestamp"] + ], + [ + "intrinsic_metadata.egress_global_timestamp", + ["standard_metadata", "egress_global_timestamp"] + ], + [ + "intrinsic_metadata.lf_field_list", + ["standard_metadata", "lf_field_list"] + ], + [ + "intrinsic_metadata.mcast_grp", + ["standard_metadata", "mcast_grp"] + ], + [ + "intrinsic_metadata.resubmit_flag", + ["standard_metadata", "resubmit_flag"] + ], + [ + "intrinsic_metadata.egress_rid", + ["standard_metadata", "egress_rid"] + ], + [ + "intrinsic_metadata.recirculate_flag", + ["standard_metadata", "recirculate_flag"] + ], + [ + "intrinsic_metadata.priority", + ["standard_metadata", "priority"] + ] + ], + "program" : "p4src/main.p4", + "__meta__" : { + "version" : [2, 18], + "compiler" : "https://github.com/p4lang/p4c" + } +} \ No newline at end of file diff --git a/src/tests/hackfest3/p4/main.p4 b/src/tests/hackfest3/p4/main.p4 new file mode 100644 index 0000000000000000000000000000000000000000..843eb0d580e362e74b25c768b1b01e750138637a --- /dev/null +++ b/src/tests/hackfest3/p4/main.p4 @@ -0,0 +1,144 @@ +/* + * Copyright 2019-present Open Networking Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +#include +#include + +typedef bit<9> port_num_t; +typedef bit<48> mac_addr_t; +typedef bit<16> mcast_group_id_t; + +//------------------------------------------------------------------------------ +// HEADER DEFINITIONS +//------------------------------------------------------------------------------ + +header ethernet_t { + mac_addr_t dst_addr; + mac_addr_t src_addr; + bit<16> ether_type; +} + +struct parsed_headers_t { + ethernet_t ethernet; +} + +struct local_metadata_t { + bool is_multicast; +} + + +//------------------------------------------------------------------------------ +// INGRESS PIPELINE +//------------------------------------------------------------------------------ + +parser ParserImpl (packet_in packet, + out parsed_headers_t hdr, + inout local_metadata_t local_metadata, + inout standard_metadata_t standard_metadata) +{ + state start { + transition parse_ethernet; + } + + state parse_ethernet { + packet.extract(hdr.ethernet); + transition accept; + } +} + + +control VerifyChecksumImpl(inout parsed_headers_t hdr, + inout local_metadata_t meta) +{ + apply { /* EMPTY */ } +} + + +control IngressPipeImpl (inout parsed_headers_t hdr, + inout local_metadata_t local_metadata, + inout standard_metadata_t standard_metadata) { + + // Drop action shared by many tables. + action drop() { + mark_to_drop(standard_metadata); + } + + action set_egress_port(port_num_t port) { + standard_metadata.egress_spec = port; + } + + action set_multicast_group(mcast_group_id_t gid) { + // gid will be used by the Packet Replication Engine (PRE) in the + // Traffic Manager--located right after the ingress pipeline, to + // replicate a packet to multiple egress ports, specified by the control + // plane by means of P4Runtime MulticastGroupEntry messages. + standard_metadata.mcast_grp = gid; + local_metadata.is_multicast = true; + } + + // --- l2_exact_table ------------------ + + table l2_exact_table { + key = { + standard_metadata.ingress_port: exact; + } + actions = { + set_egress_port; + set_multicast_group; + @defaultonly drop; + } + const default_action = drop; + } + + apply { + l2_exact_table.apply(); + } +} + +//------------------------------------------------------------------------------ +// EGRESS PIPELINE +//------------------------------------------------------------------------------ + +control EgressPipeImpl (inout parsed_headers_t hdr, + inout local_metadata_t local_metadata, + inout standard_metadata_t standard_metadata) { + apply { /* EMPTY */ } +} + + +control ComputeChecksumImpl(inout parsed_headers_t hdr, + inout local_metadata_t local_metadata) +{ + apply { /* EMPTY */ } +} + + +control DeparserImpl(packet_out packet, in parsed_headers_t hdr) { + apply { + packet.emit(hdr.ethernet); + } +} + + +V1Switch( + ParserImpl(), + VerifyChecksumImpl(), + IngressPipeImpl(), + EgressPipeImpl(), + ComputeChecksumImpl(), + DeparserImpl() +) main; diff --git a/src/tests/hackfest3/p4/orig/bmv2.json b/src/tests/hackfest3/p4/orig/bmv2.json new file mode 100644 index 0000000000000000000000000000000000000000..f001eb52e90e875c4152f4d7820664402ac856c3 --- /dev/null +++ b/src/tests/hackfest3/p4/orig/bmv2.json @@ -0,0 +1,381 @@ +{ + "header_types" : [ + { + "name" : "scalars_0", + "id" : 0, + "fields" : [ + ["local_metadata_t.is_multicast", 1, false], + ["_padding_0", 7, false] + ] + }, + { + "name" : "standard_metadata", + "id" : 1, + "fields" : [ + ["ingress_port", 9, false], + ["egress_spec", 9, false], + ["egress_port", 9, false], + ["clone_spec", 32, false], + ["instance_type", 32, false], + ["drop", 1, false], + ["recirculate_port", 16, false], + ["packet_length", 32, false], + ["enq_timestamp", 32, false], + ["enq_qdepth", 19, false], + ["deq_timedelta", 32, false], + ["deq_qdepth", 19, false], + ["ingress_global_timestamp", 48, false], + ["egress_global_timestamp", 48, false], + ["lf_field_list", 32, false], + ["mcast_grp", 16, false], + ["resubmit_flag", 32, false], + ["egress_rid", 16, false], + ["recirculate_flag", 32, false], + ["checksum_error", 1, false], + ["parser_error", 32, false], + ["priority", 3, false], + ["_padding", 2, false] + ] + }, + { + "name" : "ethernet_t", + "id" : 2, + "fields" : [ + ["dst_addr", 48, false], + ["src_addr", 48, false], + ["ether_type", 16, false] + ] + } + ], + "headers" : [ + { + "name" : "scalars", + "id" : 0, + "header_type" : "scalars_0", + "metadata" : true, + "pi_omit" : true + }, + { + "name" : "standard_metadata", + "id" : 1, + "header_type" : "standard_metadata", + "metadata" : true, + "pi_omit" : true + }, + { + "name" : "ethernet", + "id" : 2, + "header_type" : "ethernet_t", + "metadata" : false, + "pi_omit" : true + } + ], + "header_stacks" : [], + "header_union_types" : [], + "header_unions" : [], + "header_union_stacks" : [], + "field_lists" : [], + "errors" : [ + ["NoError", 1], + ["PacketTooShort", 2], + ["NoMatch", 3], + ["StackOutOfBounds", 4], + ["HeaderTooShort", 5], + ["ParserTimeout", 6], + ["ParserInvalidArgument", 7] + ], + "enums" : [], + "parsers" : [ + { + "name" : "parser", + "id" : 0, + "init_state" : "start", + "parse_states" : [ + { + "name" : "start", + "id" : 0, + "parser_ops" : [ + { + "parameters" : [ + { + "type" : "regular", + "value" : "ethernet" + } + ], + "op" : "extract" + } + ], + "transitions" : [ + { + "value" : "default", + "mask" : null, + "next_state" : null + } + ], + "transition_key" : [] + } + ] + } + ], + "parse_vsets" : [], + "deparsers" : [ + { + "name" : "deparser", + "id" : 0, + "source_info" : { + "filename" : "p4src/main.p4", + "line" : 130, + "column" : 8, + "source_fragment" : "DeparserImpl" + }, + "order" : ["ethernet"] + } + ], + "meter_arrays" : [], + "counter_arrays" : [], + "register_arrays" : [], + "calculations" : [], + "learn_lists" : [], + "actions" : [ + { + "name" : "IngressPipeImpl.drop", + "id" : 0, + "runtime_data" : [], + "primitives" : [ + { + "op" : "mark_to_drop", + "parameters" : [ + { + "type" : "header", + "value" : "standard_metadata" + } + ], + "source_info" : { + "filename" : "p4src/main.p4", + "line" : 77, + "column" : 8, + "source_fragment" : "mark_to_drop(standard_metadata)" + } + } + ] + }, + { + "name" : "IngressPipeImpl.set_egress_port", + "id" : 1, + "runtime_data" : [ + { + "name" : "port", + "bitwidth" : 9 + } + ], + "primitives" : [ + { + "op" : "assign", + "parameters" : [ + { + "type" : "field", + "value" : ["standard_metadata", "egress_spec"] + }, + { + "type" : "runtime_data", + "value" : 0 + } + ], + "source_info" : { + "filename" : "p4src/main.p4", + "line" : 81, + "column" : 8, + "source_fragment" : "standard_metadata.egress_spec = port" + } + } + ] + }, + { + "name" : "IngressPipeImpl.set_multicast_group", + "id" : 2, + "runtime_data" : [ + { + "name" : "gid", + "bitwidth" : 16 + } + ], + "primitives" : [ + { + "op" : "assign", + "parameters" : [ + { + "type" : "field", + "value" : ["standard_metadata", "mcast_grp"] + }, + { + "type" : "runtime_data", + "value" : 0 + } + ], + "source_info" : { + "filename" : "p4src/main.p4", + "line" : 89, + "column" : 8, + "source_fragment" : "standard_metadata.mcast_grp = gid" + } + }, + { + "op" : "assign", + "parameters" : [ + { + "type" : "field", + "value" : ["scalars", "local_metadata_t.is_multicast"] + }, + { + "type" : "expression", + "value" : { + "type" : "expression", + "value" : { + "op" : "b2d", + "left" : null, + "right" : { + "type" : "bool", + "value" : true + } + } + } + } + ], + "source_info" : { + "filename" : "p4src/main.p4", + "line" : 90, + "column" : 8, + "source_fragment" : "local_metadata.is_multicast = true" + } + } + ] + } + ], + "pipelines" : [ + { + "name" : "ingress", + "id" : 0, + "source_info" : { + "filename" : "p4src/main.p4", + "line" : 71, + "column" : 8, + "source_fragment" : "IngressPipeImpl" + }, + "init_table" : "IngressPipeImpl.l2_exact_table", + "tables" : [ + { + "name" : "IngressPipeImpl.l2_exact_table", + "id" : 0, + "source_info" : { + "filename" : "p4src/main.p4", + "line" : 95, + "column" : 10, + "source_fragment" : "l2_exact_table" + }, + "key" : [ + { + "match_type" : "exact", + "name" : "standard_metadata.ingress_port", + "target" : ["standard_metadata", "ingress_port"], + "mask" : null + } + ], + "match_type" : "exact", + "type" : "simple", + "max_size" : 1024, + "with_counters" : false, + "support_timeout" : false, + "direct_meters" : null, + "action_ids" : [1, 2, 0], + "actions" : ["IngressPipeImpl.set_egress_port", "IngressPipeImpl.set_multicast_group", "IngressPipeImpl.drop"], + "base_default_next" : null, + "next_tables" : { + "IngressPipeImpl.set_egress_port" : null, + "IngressPipeImpl.set_multicast_group" : null, + "IngressPipeImpl.drop" : null + }, + "default_entry" : { + "action_id" : 0, + "action_const" : true, + "action_data" : [], + "action_entry_const" : true + } + } + ], + "action_profiles" : [], + "conditionals" : [] + }, + { + "name" : "egress", + "id" : 1, + "source_info" : { + "filename" : "p4src/main.p4", + "line" : 116, + "column" : 8, + "source_fragment" : "EgressPipeImpl" + }, + "init_table" : null, + "tables" : [], + "action_profiles" : [], + "conditionals" : [] + } + ], + "checksums" : [], + "force_arith" : [], + "extern_instances" : [], + "field_aliases" : [ + [ + "queueing_metadata.enq_timestamp", + ["standard_metadata", "enq_timestamp"] + ], + [ + "queueing_metadata.enq_qdepth", + ["standard_metadata", "enq_qdepth"] + ], + [ + "queueing_metadata.deq_timedelta", + ["standard_metadata", "deq_timedelta"] + ], + [ + "queueing_metadata.deq_qdepth", + ["standard_metadata", "deq_qdepth"] + ], + [ + "intrinsic_metadata.ingress_global_timestamp", + ["standard_metadata", "ingress_global_timestamp"] + ], + [ + "intrinsic_metadata.egress_global_timestamp", + ["standard_metadata", "egress_global_timestamp"] + ], + [ + "intrinsic_metadata.lf_field_list", + ["standard_metadata", "lf_field_list"] + ], + [ + "intrinsic_metadata.mcast_grp", + ["standard_metadata", "mcast_grp"] + ], + [ + "intrinsic_metadata.resubmit_flag", + ["standard_metadata", "resubmit_flag"] + ], + [ + "intrinsic_metadata.egress_rid", + ["standard_metadata", "egress_rid"] + ], + [ + "intrinsic_metadata.recirculate_flag", + ["standard_metadata", "recirculate_flag"] + ], + [ + "intrinsic_metadata.priority", + ["standard_metadata", "priority"] + ] + ], + "program" : "p4src/main.p4", + "__meta__" : { + "version" : [2, 18], + "compiler" : "https://github.com/p4lang/p4c" + } +} \ No newline at end of file diff --git a/src/tests/hackfest3/p4/orig/main.p4 b/src/tests/hackfest3/p4/orig/main.p4 new file mode 100644 index 0000000000000000000000000000000000000000..843eb0d580e362e74b25c768b1b01e750138637a --- /dev/null +++ b/src/tests/hackfest3/p4/orig/main.p4 @@ -0,0 +1,144 @@ +/* + * Copyright 2019-present Open Networking Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +#include +#include + +typedef bit<9> port_num_t; +typedef bit<48> mac_addr_t; +typedef bit<16> mcast_group_id_t; + +//------------------------------------------------------------------------------ +// HEADER DEFINITIONS +//------------------------------------------------------------------------------ + +header ethernet_t { + mac_addr_t dst_addr; + mac_addr_t src_addr; + bit<16> ether_type; +} + +struct parsed_headers_t { + ethernet_t ethernet; +} + +struct local_metadata_t { + bool is_multicast; +} + + +//------------------------------------------------------------------------------ +// INGRESS PIPELINE +//------------------------------------------------------------------------------ + +parser ParserImpl (packet_in packet, + out parsed_headers_t hdr, + inout local_metadata_t local_metadata, + inout standard_metadata_t standard_metadata) +{ + state start { + transition parse_ethernet; + } + + state parse_ethernet { + packet.extract(hdr.ethernet); + transition accept; + } +} + + +control VerifyChecksumImpl(inout parsed_headers_t hdr, + inout local_metadata_t meta) +{ + apply { /* EMPTY */ } +} + + +control IngressPipeImpl (inout parsed_headers_t hdr, + inout local_metadata_t local_metadata, + inout standard_metadata_t standard_metadata) { + + // Drop action shared by many tables. + action drop() { + mark_to_drop(standard_metadata); + } + + action set_egress_port(port_num_t port) { + standard_metadata.egress_spec = port; + } + + action set_multicast_group(mcast_group_id_t gid) { + // gid will be used by the Packet Replication Engine (PRE) in the + // Traffic Manager--located right after the ingress pipeline, to + // replicate a packet to multiple egress ports, specified by the control + // plane by means of P4Runtime MulticastGroupEntry messages. + standard_metadata.mcast_grp = gid; + local_metadata.is_multicast = true; + } + + // --- l2_exact_table ------------------ + + table l2_exact_table { + key = { + standard_metadata.ingress_port: exact; + } + actions = { + set_egress_port; + set_multicast_group; + @defaultonly drop; + } + const default_action = drop; + } + + apply { + l2_exact_table.apply(); + } +} + +//------------------------------------------------------------------------------ +// EGRESS PIPELINE +//------------------------------------------------------------------------------ + +control EgressPipeImpl (inout parsed_headers_t hdr, + inout local_metadata_t local_metadata, + inout standard_metadata_t standard_metadata) { + apply { /* EMPTY */ } +} + + +control ComputeChecksumImpl(inout parsed_headers_t hdr, + inout local_metadata_t local_metadata) +{ + apply { /* EMPTY */ } +} + + +control DeparserImpl(packet_out packet, in parsed_headers_t hdr) { + apply { + packet.emit(hdr.ethernet); + } +} + + +V1Switch( + ParserImpl(), + VerifyChecksumImpl(), + IngressPipeImpl(), + EgressPipeImpl(), + ComputeChecksumImpl(), + DeparserImpl() +) main; diff --git a/src/tests/hackfest3/p4/orig/p4info.txt b/src/tests/hackfest3/p4/orig/p4info.txt new file mode 100644 index 0000000000000000000000000000000000000000..0b58e740864b72e6ca87582431cd7bd57894d0dd --- /dev/null +++ b/src/tests/hackfest3/p4/orig/p4info.txt @@ -0,0 +1,62 @@ +pkg_info { + arch: "v1model" +} +tables { + preamble { + id: 33605373 + name: "IngressPipeImpl.l2_exact_table" + alias: "l2_exact_table" + } + match_fields { + id: 1 + name: "standard_metadata.ingress_port" + bitwidth: 9 + match_type: EXACT + } + action_refs { + id: 16812802 + } + action_refs { + id: 16841371 + } + action_refs { + id: 16796182 + annotations: "@defaultonly" + scope: DEFAULT_ONLY + } + const_default_action_id: 16796182 + size: 1024 +} +actions { + preamble { + id: 16796182 + name: "IngressPipeImpl.drop" + alias: "drop" + } +} +actions { + preamble { + id: 16812802 + name: "IngressPipeImpl.set_egress_port" + alias: "set_egress_port" + } + params { + id: 1 + name: "port" + bitwidth: 9 + } +} +actions { + preamble { + id: 16841371 + name: "IngressPipeImpl.set_multicast_group" + alias: "set_multicast_group" + } + params { + id: 1 + name: "gid" + bitwidth: 16 + } +} +type_info { +} diff --git a/src/tests/hackfest3/p4/p4info.txt b/src/tests/hackfest3/p4/p4info.txt new file mode 100644 index 0000000000000000000000000000000000000000..0b58e740864b72e6ca87582431cd7bd57894d0dd --- /dev/null +++ b/src/tests/hackfest3/p4/p4info.txt @@ -0,0 +1,62 @@ +pkg_info { + arch: "v1model" +} +tables { + preamble { + id: 33605373 + name: "IngressPipeImpl.l2_exact_table" + alias: "l2_exact_table" + } + match_fields { + id: 1 + name: "standard_metadata.ingress_port" + bitwidth: 9 + match_type: EXACT + } + action_refs { + id: 16812802 + } + action_refs { + id: 16841371 + } + action_refs { + id: 16796182 + annotations: "@defaultonly" + scope: DEFAULT_ONLY + } + const_default_action_id: 16796182 + size: 1024 +} +actions { + preamble { + id: 16796182 + name: "IngressPipeImpl.drop" + alias: "drop" + } +} +actions { + preamble { + id: 16812802 + name: "IngressPipeImpl.set_egress_port" + alias: "set_egress_port" + } + params { + id: 1 + name: "port" + bitwidth: 9 + } +} +actions { + preamble { + id: 16841371 + name: "IngressPipeImpl.set_multicast_group" + alias: "set_multicast_group" + } + params { + id: 1 + name: "gid" + bitwidth: 16 + } +} +type_info { +} diff --git a/src/tests/hackfest3/probe/README.md b/src/tests/hackfest3/probe/README.md new file mode 100644 index 0000000000000000000000000000000000000000..bfc1e4731dc44a61b32ad5d75093f4aca1f57dbf --- /dev/null +++ b/src/tests/hackfest3/probe/README.md @@ -0,0 +1,40 @@ +# Probe for P4 mininet devices + +Step 1: +To copy the necessary files, run: + +``` +probe-tfs/deploy.sh +``` + +Step 2: +To connect to the mininet docker, run: + +``` +probe-tfs/connect-to-mininet.sh +``` + +Step 3: +From inside the mininet docker, run: + +``` +./tfsagent +``` + +Step 4 (on another terminal): +Establish the service: +``` +src/tests/p4/run_test_02_create_service.sh +``` + +Step 5: +From inside mininet (make mn-cli): +``` +client ./tfsping +``` + +Step 6 (on another terminal): +To check the latest monitoring samples, run +``` +python src/tests/p4/probe/monitoring_kpis.py +``` diff --git a/src/tests/hackfest3/probe/monitoring_kpis.ipynb b/src/tests/hackfest3/probe/monitoring_kpis.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..728b7394eb9cbbf50fd5b4fcad568c0968abc608 --- /dev/null +++ b/src/tests/hackfest3/probe/monitoring_kpis.ipynb @@ -0,0 +1,184 @@ +{ + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Checking the monitoring component" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import time\n", + "import datetime\n", + "import uuid\n", + "import random\n", + "\n", + "from dotenv import load_dotenv\n", + "from IPython.display import clear_output, display, HTML\n", + "\n", + "from common.tools.timestamp.Converters import timestamp_utcnow_to_float, timestamp_float_to_string\n", + "from common.tools.grpc.Tools import grpc_message_to_json_string\n", + "from common.proto.kpi_sample_types_pb2 import KpiSampleType\n", + "from common.proto.monitoring_pb2 import KpiDescriptor, KpiId, KpiQuery, Kpi\n", + "from monitoring.client.MonitoringClient import MonitoringClient" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'0abfb00117d4461b9fa5085bee4be58f'" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "load_dotenv()\n", + "\n", + "monitoring_client = MonitoringClient()\n", + "uuid.uuid4().hex" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Created KPI {\"kpi_id\": {\"uuid\": \"1\"}}: \n" + ] + } + ], + "source": [ + "kpi_description: KpiDescriptor = KpiDescriptor()\n", + "kpi_description.kpi_description = \"Security status of service {}\".format(uuid.uuid4().hex)\n", + "kpi_description.service_id.service_uuid.uuid = \"608df176-90b8-5950-b50d-1810c6eaaa5d\"\n", + "kpi_description.kpi_sample_type = KpiSampleType.KPISAMPLETYPE_UNKNOWN\n", + "new_kpi = monitoring_client.SetKpi(kpi_description)\n", + "print(\"Created KPI {}: \".format(grpc_message_to_json_string(new_kpi)))" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + " \n", + " \n", + " \n", + " \n", + "
2023-02-24 16:23:34.373384
KPI IDTimestampValue
0 - 12023-02-23T13:55:09ZfloatVal: 1868.0\n", + "
1 - 12023-02-23T13:55:07ZfloatVal: 1878.0\n", + "
2 - 12023-02-23T13:55:05ZfloatVal: 2065.0\n", + "
3 - 12023-02-23T13:55:03ZfloatVal: 1993.0\n", + "
4 - 12023-02-23T13:55:01ZfloatVal: 2006.0\n", + "
5 - 12023-02-23T13:54:59ZfloatVal: 1938.0\n", + "
6 - 12023-02-23T13:54:57ZfloatVal: 1920.0\n", + "
7 - 12023-02-23T13:54:55ZfloatVal: 1984.0\n", + "
8 - 12023-02-23T13:54:53ZfloatVal: 1883.0\n", + "
9 - 12023-02-23T13:54:51ZfloatVal: 1948.0\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "ename": "KeyboardInterrupt", + "evalue": "", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn [4], line 31\u001b[0m\n\u001b[1;32m 29\u001b[0m table \u001b[39m+\u001b[39m\u001b[39m=\u001b[39m \u001b[39m\"\u001b[39m\u001b[39m\u001b[39m\u001b[39m\"\u001b[39m\n\u001b[1;32m 30\u001b[0m display(HTML(table))\n\u001b[0;32m---> 31\u001b[0m time\u001b[39m.\u001b[39;49msleep(\u001b[39m5\u001b[39;49m)\n\u001b[1;32m 32\u001b[0m clear_output(wait\u001b[39m=\u001b[39m\u001b[39mTrue\u001b[39;00m)\n", + "\u001b[0;31mKeyboardInterrupt\u001b[0m: " + ] + } + ], + "source": [ + "kpi_id = input(\"What is the KPI ID?\")\n", + "query = KpiQuery()\n", + "query.kpi_ids.append(KpiId(**{\"kpi_id\": {\"uuid\": kpi_id}}))\n", + "query.last_n_samples = 10\n", + "\n", + "while True:\n", + "\n", + " kpi = Kpi()\n", + " kpi.kpi_id.kpi_id.uuid = new_kpi.kpi_id.uuid\n", + " kpi.timestamp.timestamp = timestamp_utcnow_to_float()\n", + " kpi.kpi_value.int32Val = random.randint(10, 4000)\n", + " # monitoring_client.IncludeKpi(kpi)\n", + "\n", + " response = monitoring_client.QueryKpiData(query)\n", + " # print(response)\n", + " table = f\"\"\"\n", + " \n", + " \n", + " \n", + " \n", + " \"\"\"\n", + " for kpi in response.raw_kpi_lists:\n", + " cur_kpi_id = kpi.kpi_id.kpi_id.uuid\n", + " for i, raw_kpi in enumerate(kpi.raw_kpis):\n", + " # print(cur_kpi_id, raw_kpi.timestamp.timestamp, raw_kpi.kpi_value)\n", + " table += \"\".format(\n", + " i, cur_kpi_id, timestamp_float_to_string(raw_kpi.timestamp.timestamp), raw_kpi.kpi_value\n", + " )\n", + " table += \"
{datetime.datetime.now()}
KPI IDTimestampValue
{} - {}{}{}
\"\n", + " display(HTML(table))\n", + " time.sleep(5)\n", + " clear_output(wait=True)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "tfs", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.14" + }, + "orig_nbformat": 4, + "vscode": { + "interpreter": { + "hash": "7ea5723b29014fc8d8bf1a065f5287f0787f54201758f2b5d4b4b0b2ddc48863" + } + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/src/tests/hackfest3/probe/monitoring_kpis.py b/src/tests/hackfest3/probe/monitoring_kpis.py new file mode 100644 index 0000000000000000000000000000000000000000..880977a2f62069586efdb398b8b3b5d3ac20dac0 --- /dev/null +++ b/src/tests/hackfest3/probe/monitoring_kpis.py @@ -0,0 +1,85 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# File to monitor the latest *n* samples from the KPI ID *id* +# and updates it every *i* seconds +# +# Author: Carlos Natalino + +import argparse +import datetime +import time + +from common.proto.kpi_sample_types_pb2 import KpiSampleType +from common.proto.monitoring_pb2 import KpiDescriptor, KpiId, KpiQuery +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.timestamp.Converters import timestamp_float_to_string +from monitoring.client.MonitoringClient import MonitoringClient + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + "-n", + "--last-n-samples", + default=10, + type=int, + help="Number of latest samples of the KPI to show.", + ) + parser.add_argument( + "-s", + "--sleep", + default=5, + type=int, + help="Seconds between consecutive refreshes.", + ) + parser.add_argument("-id", "--kpi-id", help="KPI ID, if known.") + args = parser.parse_args() + + monitoring_client = MonitoringClient() + + if args.kpi_id is None: + service_uuid = "608df176-90b8-5950-b50d-1810c6eaaa5d" + kpi_description: KpiDescriptor = KpiDescriptor() + kpi_description.kpi_description = "Security status of service {}".format( + service_uuid + ) + kpi_description.service_id.service_uuid.uuid = service_uuid + kpi_description.kpi_sample_type = KpiSampleType.KPISAMPLETYPE_UNKNOWN + new_kpi = monitoring_client.SetKpi(kpi_description) + print("Created KPI {}: ".format(grpc_message_to_json_string(new_kpi))) + kpi_id = new_kpi.kpi_id.uuid + else: + kpi_id = args.kpi_id + + query = KpiQuery() + query.kpi_ids.append(KpiId(**{"kpi_id": {"uuid": kpi_id}})) + query.last_n_samples = args.last_n_samples + + while True: + print(chr(27) + "[2J") + response = monitoring_client.QueryKpiData(query) + print("{}\t{}\t{:<20}\t{}".format("Index", "KPI ID", "Timestamp", "Value")) + for kpi in response.raw_kpi_lists: + cur_kpi_id = kpi.kpi_id.kpi_id.uuid + for i, raw_kpi in enumerate(kpi.raw_kpis): + print( + "{}\t{}\t{}\t{}".format( + i, + cur_kpi_id, + timestamp_float_to_string(raw_kpi.timestamp.timestamp), + raw_kpi.kpi_value.floatVal, + ) + ) + print("Last update:", datetime.datetime.now().strftime("%H:%M:%S")) + time.sleep(args.sleep) diff --git a/src/tests/hackfest3/probe/probe-tfs/.gitignore b/src/tests/hackfest3/probe/probe-tfs/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..dc8d7ee54c37fd887f19206592ed03a33118a59a --- /dev/null +++ b/src/tests/hackfest3/probe/probe-tfs/.gitignore @@ -0,0 +1,18 @@ +# Generated by Cargo +# will have compiled files and executables +/target/ + +# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries +# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html +Cargo.lock + +# These are backup files generated by rustfmt +**/*.rs.bk + + +# Added by cargo + +/target + +.env_bkp +.env diff --git a/src/tests/hackfest3/probe/probe-tfs/Cargo.toml b/src/tests/hackfest3/probe/probe-tfs/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..fb5db98bf5233e905d83b7f9fe06d44a71c3a0fd --- /dev/null +++ b/src/tests/hackfest3/probe/probe-tfs/Cargo.toml @@ -0,0 +1,39 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +[package] +name = "rust-tfs" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +dotenv = "0.15.0" +futures = "0.3.26" +prost = "0.11.6" +surge-ping = "0.7.3" +tokio = { version = "1.25", features = ["macros", "rt-multi-thread"] } +tonic = "0.8.3" + +[[bin]] +name = "tfsping" +path = "src/ping.rs" + +[[bin]] +name = "tfsagent" +path = "src/agent.rs" + +[build-dependencies] +tonic-build = "0.8.3" diff --git a/src/tests/hackfest3/probe/probe-tfs/LICENSE b/src/tests/hackfest3/probe/probe-tfs/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64 --- /dev/null +++ b/src/tests/hackfest3/probe/probe-tfs/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/src/tests/hackfest3/probe/probe-tfs/README.md b/src/tests/hackfest3/probe/probe-tfs/README.md new file mode 100644 index 0000000000000000000000000000000000000000..f88d7c542dae22ad623797f43750e0589d2473cf --- /dev/null +++ b/src/tests/hackfest3/probe/probe-tfs/README.md @@ -0,0 +1,2 @@ +# rust-tfs +Client for TFS functionalities written in Rust. diff --git a/src/tests/hackfest3/probe/probe-tfs/build.rs b/src/tests/hackfest3/probe/probe-tfs/build.rs new file mode 100644 index 0000000000000000000000000000000000000000..1dda249d16b3c571676a254f2178f772fb765c81 --- /dev/null +++ b/src/tests/hackfest3/probe/probe-tfs/build.rs @@ -0,0 +1,34 @@ +/** + * Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Build script that generates Rust code for the protobuffers. + * + * Author: Carlos Natalino + */ + +fn main() { + tonic_build::configure() + .build_server(false) + .compile( + &[ + "proto/context.proto", + "proto/acl.proto", + "proto/kpi_sample_types.proto", + "proto/monitoring.proto", + ], + &["proto"], + ) + .unwrap_or_else(|e| panic!("Failed to compile protos {:?}", e)); +} diff --git a/src/tests/hackfest3/probe/probe-tfs/connect_to_mininet.sh b/src/tests/hackfest3/probe/probe-tfs/connect_to_mininet.sh new file mode 100755 index 0000000000000000000000000000000000000000..bba3eaa9a985f3e546f9df2681879faef0a9b83e --- /dev/null +++ b/src/tests/hackfest3/probe/probe-tfs/connect_to_mininet.sh @@ -0,0 +1,16 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +CONTAINER=`docker ps | grep mininet | cut -f1 -d" "` +docker exec -it $CONTAINER /bin/bash diff --git a/src/tests/hackfest3/probe/probe-tfs/deploy.sh b/src/tests/hackfest3/probe/probe-tfs/deploy.sh new file mode 100755 index 0000000000000000000000000000000000000000..733f02d11ecd4a9de90898b210b2fe9b579447f2 --- /dev/null +++ b/src/tests/hackfest3/probe/probe-tfs/deploy.sh @@ -0,0 +1,37 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# build the software +# uncomment the line below if you want to build it +# cargo build --release --target=x86_64-unknown-linux-musl + +# build a .env file with the info from context and monitoring services + +if [ -z "${CONTEXTSERVICE_SERVICE_HOST}" ] || [ -z "${CONTEXTSERVICE_SERVICE_PORT_GRPC}" ] || \ + [ -z "${MONITORINGSERVICE_SERVICE_HOST}" ] || [ -z "${MONITORINGSERVICE_SERVICE_PORT_GRPC}" ] +then + echo "TFS_ENV_VARS are not loaded." + exit 1 +fi + +echo "CONTEXTSERVICE_SERVICE_HOST=${CONTEXTSERVICE_SERVICE_HOST}" > .env +echo "CONTEXTSERVICE_SERVICE_PORT_GRPC=${CONTEXTSERVICE_SERVICE_PORT_GRPC}" >> .env +echo "MONITORINGSERVICE_SERVICE_HOST=${MONITORINGSERVICE_SERVICE_HOST}" >> .env +echo "MONITORINGSERVICE_SERVICE_PORT_GRPC=${MONITORINGSERVICE_SERVICE_PORT_GRPC}" >> .env + +# get container id +CONTAINER=`docker ps | grep mininet | cut -f1 -d" "` +docker cp target/x86_64-unknown-linux-musl/release/tfsping $CONTAINER:/root +docker cp target/x86_64-unknown-linux-musl/release/tfsagent $CONTAINER:/root +docker cp .env $CONTAINER:/root diff --git a/src/tests/hackfest3/probe/probe-tfs/proto b/src/tests/hackfest3/probe/probe-tfs/proto new file mode 120000 index 0000000000000000000000000000000000000000..ce803d6a96f0064d107428238b9beecb2a0ed2be --- /dev/null +++ b/src/tests/hackfest3/probe/probe-tfs/proto @@ -0,0 +1 @@ +../../../../../proto \ No newline at end of file diff --git a/src/tests/hackfest3/probe/probe-tfs/src/agent.rs b/src/tests/hackfest3/probe/probe-tfs/src/agent.rs new file mode 100644 index 0000000000000000000000000000000000000000..4221cbe28ba75021d2b7c2de6dbef46a043cc2bb --- /dev/null +++ b/src/tests/hackfest3/probe/probe-tfs/src/agent.rs @@ -0,0 +1,254 @@ +/** + * Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Program that starts the ping probe and reports it to the Unix socket. + * + * Author: Carlos Natalino + */ + +/************** Modules needed to communicate with TeraFlowSDN ***************/ +pub mod kpi_sample_types { + tonic::include_proto!("kpi_sample_types"); +} + +pub mod acl { + tonic::include_proto!("acl"); +} + +pub mod context { + // tonic::include_proto!(); + tonic::include_proto!("context"); +} + +pub mod monitoring { + tonic::include_proto!("monitoring"); +} + +/********************************** Imports **********************************/ +// standard library +use std::env; +use std::path::Path; +use std::sync::Arc; +use std::time::SystemTime; +use std::{fs, io}; + +// external libraries +use dotenv::dotenv; +use futures; +use futures::lock::Mutex; +use tokio::net::UnixListener; + +// proto +use context::context_service_client::ContextServiceClient; +use context::{Empty, Timestamp}; +use kpi_sample_types::KpiSampleType; +use monitoring::monitoring_service_client::MonitoringServiceClient; +use monitoring::{Kpi, KpiDescriptor, KpiValue}; + +#[tokio::main] +async fn main() -> Result<(), Box> { + dotenv().ok(); // load the environment variables from the .env file + + let path = Path::new("/tmp/tfsping"); + + if path.exists() { + fs::remove_file(path)?; // removes the socket in case it exists + } + + let listener = UnixListener::bind(path).unwrap(); + println!("Bound to the path {:?}", path); + + // ARC Mutex that tells whether or not to send the results to the monitoring component + let send_ping = Arc::new(Mutex::new(false)); + // copy used by the task that receives data from the probe + let ping_probe = send_ping.clone(); + // copy used by the task that receives stream data from TFS + let ping_trigger = send_ping.clone(); + + // ARC mutex that hosts the KPI ID to be used as the monitoring KPI + let kpi_id: Arc>> = Arc::new(Mutex::new(None)); + let kpi_id_probe = kpi_id.clone(); + let kpi_id_trigger = kpi_id.clone(); + + let t1 = tokio::spawn(async move { + let monitoring_host = env::var("MONITORINGSERVICE_SERVICE_HOST") + .unwrap_or_else(|_| panic!("receiver: Could not find monitoring host!")); + let monitoring_port = env::var("MONITORINGSERVICE_SERVICE_PORT_GRPC") + .unwrap_or_else(|_| panic!("receiver: Could not find monitoring port!")); + + let mut monitoring_client = MonitoringServiceClient::connect(format!( + "http://{}:{}", + monitoring_host, monitoring_port + )) + .await + .unwrap(); + println!("receiver: Connected to the monitoring service!"); + loop { + println!("receiver: Awaiting for new connection!"); + let (stream, _socket) = listener.accept().await.unwrap(); + + stream.readable().await.unwrap(); + + let mut buf = [0; 4]; + + match stream.try_read(&mut buf) { + Ok(n) => { + let num = u32::from_be_bytes(buf); + println!("receiver: read {} bytes -- {:?}", n, num); + + let should_ping = ping_probe.lock().await; + + if *should_ping { + // only send the value to monitoring if needed + // send the value to the monitoring component + println!("receiver: Send value to monitoring"); + + let kpi_id = kpi_id_probe.lock().await; + println!("receiver: kpi id: {:?}", kpi_id); + + let now = SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .unwrap() + .as_secs(); // See struct std::time::Duration methods + + let kpi = Kpi { + kpi_id: kpi_id.clone(), + timestamp: Some(Timestamp { + timestamp: now as f64, + }), + kpi_value: Some(KpiValue { + value: Some(monitoring::kpi_value::Value::Int32Val(num as i32)), + }), + }; + // println!("Request: {:?}", kpi); + let response = monitoring_client + .include_kpi(tonic::Request::new(kpi)) + .await; + // println!("Response: {:?}", response); + if response.is_err() { + println!("receiver: Issue with the response from monitoring!"); + } + } + } + Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { + continue; + } + Err(e) => { + println!("receiver: {:?}", e); + } + } + } + }); + + let t2 = tokio::spawn(async move { + // let server_address = "129.16.37.136"; + let context_host = env::var("CONTEXTSERVICE_SERVICE_HOST") + .unwrap_or_else(|_| panic!("stream: Could not find context host!")); + let context_port = env::var("CONTEXTSERVICE_SERVICE_PORT_GRPC") + .unwrap_or_else(|_| panic!("stream: Could not find context port!")); + + let monitoring_host = env::var("MONITORINGSERVICE_SERVICE_HOST") + .unwrap_or_else(|_| panic!("stream: Could not find monitoring host!")); + let monitoring_port = env::var("MONITORINGSERVICE_SERVICE_PORT_GRPC") + .unwrap_or_else(|_| panic!("stream: Could not find monitoring port!")); + + let mut context_client = + ContextServiceClient::connect(format!("http://{}:{}", context_host, context_port)) + .await + .unwrap(); + println!("stream: Connected to the context service!"); + + let mut monitoring_client = MonitoringServiceClient::connect(format!( + "http://{}:{}", + monitoring_host, monitoring_port + )) + .await + .unwrap(); + println!("stream: Connected to the monitoring service!"); + + let mut service_event_stream = context_client + .get_service_events(tonic::Request::new(Empty {})) + .await + .unwrap() + .into_inner(); + while let Some(event) = service_event_stream.message().await.unwrap() { + let event_service = event.clone().service_id.unwrap(); + if event.event.clone().unwrap().event_type == 1 { + println!("stream: New CREATE event:\n{:?}", event_service); + + let kpi_descriptor = KpiDescriptor { + kpi_id: None, + kpi_id_list: vec![], + device_id: None, + endpoint_id: None, + slice_id: None, + connection_id: None, + kpi_description: format!( + "Latency value for service {}", + event_service.service_uuid.unwrap().uuid + ), + service_id: Some(event.clone().service_id.clone().unwrap().clone()), + kpi_sample_type: KpiSampleType::KpisampletypeUnknown.into(), + }; + + let _response = monitoring_client + .set_kpi(tonic::Request::new(kpi_descriptor)) + .await + .unwrap() + .into_inner(); + let mut kpi_id = kpi_id_trigger.lock().await; + println!("stream: KPI ID: {:?}", _response); + *kpi_id = Some(_response.clone()); + let mut should_ping = ping_trigger.lock().await; + *should_ping = true; + } else if event.event.clone().unwrap().event_type == 3 { + println!("stream: New REMOVE event:\n{:?}", event); + let mut should_ping = ping_trigger.lock().await; + *should_ping = false; + } + } + }); + + futures::future::join_all(vec![t1, t2]).await; + + // let addr = "10.0.0.2".parse().unwrap(); + // let timeout = Duration::from_secs(1); + // ping::ping(addr, Some(timeout), Some(166), Some(3), Some(5), Some(&random())).unwrap(); + + // let server_address = env::var("CONTEXTSERVICE_SERVICE_HOST").unwrap(); + + // let contexts = grpc_client.list_context_ids(tonic::Request::new(Empty { })).await?; + + // println!("{:?}", contexts.into_inner()); + // let current_context = contexts.into_inner().context_ids[0].clone(); + + // if let Some(current_context) = contexts.into_inner().context_ids[0] { + + // } + // else { + // panic!("No context available!"); + // } + + // for context in contexts.into_inner().context_ids { + // println!("{:?}", context); + // } + + // let services = grpc_client.list_services(tonic::Request::new(current_context)).await?; + // println!("{:?}", services.into_inner()); + + println!("Hello, world!"); + + Ok(()) +} diff --git a/src/tests/hackfest3/probe/probe-tfs/src/ping.rs b/src/tests/hackfest3/probe/probe-tfs/src/ping.rs new file mode 100644 index 0000000000000000000000000000000000000000..3c118c98782a4cb5def9a654edbe55186bbf3df7 --- /dev/null +++ b/src/tests/hackfest3/probe/probe-tfs/src/ping.rs @@ -0,0 +1,71 @@ +/** + * Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Program that starts the ping probe and reports it to the Unix socket. + * + * Author: Carlos Natalino + */ +// standard library +use std::io; +use std::path::Path; + +// external libraries +use tokio::net::UnixStream; +use tokio::time::{sleep, Duration}; + +async fn send_value(path: &Path, value: i32) -> Result<(), Box> { + let stream = UnixStream::connect(path).await?; + stream.writable().await; + // if ready.is_writable() { + match stream.try_write(&i32::to_be_bytes(value)) { + Ok(n) => { + println!("\twrite {} bytes\t{}", n, value); + } + Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { + println!("Error would block!"); + } + Err(e) => { + println!("error into()"); + return Err(e.into()); + } + } + // } + Ok(()) +} + +#[tokio::main] +async fn main() -> Result<(), Box> { + let path = Path::new("/tmp/tfsping"); + + loop { + let payload = [0; 1024]; + + let result = surge_ping::ping("10.0.0.2".parse()?, &payload).await; + + // let (_packet, duration) = result.unwra + + if let Ok((_packet, duration)) = result { + println!("Ping took {:.3?}\t{:?}", duration, _packet.get_identifier()); + send_value(&path, duration.as_micros() as i32).await?; + } else { + println!("Error!"); + send_value(&path, -1).await?; + } + + sleep(Duration::from_secs(2)).await; + } + + // Ok(()) // unreachable +} diff --git a/src/tests/hackfest3/probe/probe-tfs/target/x86_64-unknown-linux-musl/release/tfsagent b/src/tests/hackfest3/probe/probe-tfs/target/x86_64-unknown-linux-musl/release/tfsagent new file mode 100755 index 0000000000000000000000000000000000000000..b7cef11a433c6bf2eeb94638fa90d93f25acd3c8 Binary files /dev/null and b/src/tests/hackfest3/probe/probe-tfs/target/x86_64-unknown-linux-musl/release/tfsagent differ diff --git a/src/tests/hackfest3/probe/probe-tfs/target/x86_64-unknown-linux-musl/release/tfsping b/src/tests/hackfest3/probe/probe-tfs/target/x86_64-unknown-linux-musl/release/tfsping new file mode 100755 index 0000000000000000000000000000000000000000..6e943d292dd6653e857bf5eea3258d38ad246026 Binary files /dev/null and b/src/tests/hackfest3/probe/probe-tfs/target/x86_64-unknown-linux-musl/release/tfsping differ diff --git a/src/tests/hackfest3/run_test_01_bootstrap.sh b/src/tests/hackfest3/run_test_01_bootstrap.sh new file mode 100755 index 0000000000000000000000000000000000000000..8eb7e75dc8d2d964b447bb4eb78b15c79a7634d7 --- /dev/null +++ b/src/tests/hackfest3/run_test_01_bootstrap.sh @@ -0,0 +1,25 @@ +#!/bin/bash +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# make sure to source the following scripts: +# - my_deploy.sh +# - tfs_runtime_env_vars.sh + +source tfs_runtime_env_vars.sh +python -m pytest --verbose src/tests/hackfest3/tests/test_functional_bootstrap.py + +# To enable debugging use the following options +#python -m pytest --verbose -o log_cli=true -o log_cli_level=DEBUG src/tests/hackfest3/tests/test_functional_bootstrap.py + diff --git a/src/tests/hackfest3/run_test_02_create_service.sh b/src/tests/hackfest3/run_test_02_create_service.sh new file mode 100755 index 0000000000000000000000000000000000000000..50eb0732b364d0f560aafceae79b73662279cf05 --- /dev/null +++ b/src/tests/hackfest3/run_test_02_create_service.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source tfs_runtime_env_vars.sh +python -m pytest --verbose src/tests/hackfest3/tests/test_functional_create_service.py + +# To enable debugging use the following options +#python -m pytest --verbose -o log_cli=true -o log_cli_level=DEBUG src/tests/hackfest3/tests/test_functional_create_service.py diff --git a/src/tests/hackfest3/run_test_03_delete_service.sh b/src/tests/hackfest3/run_test_03_delete_service.sh new file mode 100755 index 0000000000000000000000000000000000000000..15f6e02c76ffccb70c01d35433133e36e896905c --- /dev/null +++ b/src/tests/hackfest3/run_test_03_delete_service.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source tfs_runtime_env_vars.sh +python -m pytest --verbose src/tests/hackfest3/tests/test_functional_delete_service.py + +# To enable debugging use the following options +#python -m pytest --verbose -o log_cli=true -o log_cli_level=DEBUG src/tests/hackfest3/tests/test_functional_delete_service.py diff --git a/src/tests/hackfest3/run_test_04_cleanup.sh b/src/tests/hackfest3/run_test_04_cleanup.sh new file mode 100755 index 0000000000000000000000000000000000000000..db02155a80b002c01e896b580ee07caa65cb9db6 --- /dev/null +++ b/src/tests/hackfest3/run_test_04_cleanup.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source tfs_runtime_env_vars.sh +python -m pytest --verbose src/tests/hackfest3/tests/test_functional_cleanup.py + +# To enable debugging use the following options +#python -m pytest --verbose -o log_cli=true -o log_cli_level=DEBUG src/tests/hackfest3/tests/test_functional_cleanup.py diff --git a/src/tests/hackfest3/setup.sh b/src/tests/hackfest3/setup.sh new file mode 100755 index 0000000000000000000000000000000000000000..0454f4b10286d05eeacc33c73c855906d0ab2c4f --- /dev/null +++ b/src/tests/hackfest3/setup.sh @@ -0,0 +1,23 @@ +#! /bin/bash +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +export POD_NAME=$(kubectl get pods -n=tfs | grep device | awk '{print $1}') + +kubectl exec ${POD_NAME} -n=tfs -c=server -- rm -rf /root/p4 +kubectl exec ${POD_NAME} -n=tfs -c=server -- mkdir /root/p4 + +kubectl cp src/tests/hackfest3/p4/p4info.txt tfs/${POD_NAME}:/root/p4 -c=server +kubectl cp src/tests/hackfest3/p4/bmv2.json tfs/${POD_NAME}:/root/p4 -c=server diff --git a/src/tests/hackfest3/tests/.gitignore b/src/tests/hackfest3/tests/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..76cb708d1b532c9b69166e55f36bcb912fd5e370 --- /dev/null +++ b/src/tests/hackfest3/tests/.gitignore @@ -0,0 +1,2 @@ +# Add here your files containing confidential testbed details such as IP addresses, ports, usernames, passwords, etc. +Credentials.py diff --git a/src/tests/hackfest3/tests/BuildDescriptors.py b/src/tests/hackfest3/tests/BuildDescriptors.py new file mode 100644 index 0000000000000000000000000000000000000000..98b78863318a7ad682fc5f970d44d02240b45a26 --- /dev/null +++ b/src/tests/hackfest3/tests/BuildDescriptors.py @@ -0,0 +1,35 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy, json, sys +from .Objects import CONTEXTS, DEVICES, LINKS, TOPOLOGIES + +def main(): + with open('tests/ofc22/descriptors_emulated.json', 'w', encoding='UTF-8') as f: + devices = [] + for device,connect_rules in DEVICES: + device = copy.deepcopy(device) + device['device_config']['config_rules'].extend(connect_rules) + devices.append(device) + + f.write(json.dumps({ + 'contexts': CONTEXTS, + 'topologies': TOPOLOGIES, + 'devices': devices, + 'links': LINKS + })) + return 0 + +if __name__ == '__main__': + sys.exit(main()) diff --git a/src/tests/hackfest3/tests/LoadDescriptors.py b/src/tests/hackfest3/tests/LoadDescriptors.py new file mode 100644 index 0000000000000000000000000000000000000000..b232935f4675d718d55e67fe3a76012a39398dda --- /dev/null +++ b/src/tests/hackfest3/tests/LoadDescriptors.py @@ -0,0 +1,40 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json, logging, sys +from common.Settings import get_setting +from context.client.ContextClient import ContextClient +from common.proto.context_pb2 import Context, Device, Link, Topology +from device.client.DeviceClient import DeviceClient + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +def main(): + context_client = ContextClient( + get_setting('CONTEXTSERVICE_SERVICE_HOST'), get_setting('CONTEXTSERVICE_SERVICE_PORT_GRPC')) + device_client = DeviceClient( + get_setting('DEVICESERVICE_SERVICE_HOST'), get_setting('DEVICESERVICE_SERVICE_PORT_GRPC')) + + with open('tests/ofc22/descriptors.json', 'r', encoding='UTF-8') as f: + descriptors = json.loads(f.read()) + + for context in descriptors['contexts' ]: context_client.SetContext (Context (**context )) + for topology in descriptors['topologies']: context_client.SetTopology(Topology(**topology)) + for device in descriptors['devices' ]: device_client .AddDevice (Device (**device )) + for link in descriptors['links' ]: context_client.SetLink (Link (**link )) + return 0 + +if __name__ == '__main__': + sys.exit(main()) diff --git a/src/tests/hackfest3/tests/Objects.py b/src/tests/hackfest3/tests/Objects.py new file mode 100644 index 0000000000000000000000000000000000000000..942e1f4b12a9f4163d371ddbe5f87ac9f90055a9 --- /dev/null +++ b/src/tests/hackfest3/tests/Objects.py @@ -0,0 +1,291 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +from typing import Dict, List, Tuple +from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME +from common.tools.object_factory.Context import json_context, json_context_id +from common.tools.object_factory.Device import ( + json_device_connect_rules, json_device_emulated_connect_rules, json_device_emulated_packet_router_disabled, + json_device_connect_rules, json_device_id, json_device_p4_disabled, + json_device_emulated_tapi_disabled, json_device_id, json_device_packetrouter_disabled, json_device_tapi_disabled) +from common.tools.object_factory.Service import ( + get_service_uuid, json_service_l3nm_planned,json_service_p4_planned) +from common.tools.object_factory.ConfigRule import ( + json_config_rule_set, json_config_rule_delete) +from common.tools.object_factory.EndPoint import json_endpoint, json_endpoint_ids, json_endpoints, json_endpoint_id +from common.tools.object_factory.EndPoint import json_endpoint_descriptor +from common.tools.object_factory.Link import get_link_uuid, json_link, json_link_id +from common.tools.object_factory.Topology import json_topology, json_topology_id +from common.proto.kpi_sample_types_pb2 import KpiSampleType + +# ----- Context -------------------------------------------------------------------------------------------------------- +CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_NAME) +CONTEXT = json_context(DEFAULT_CONTEXT_NAME) + +# ----- Topology ------------------------------------------------------------------------------------------------------- +TOPOLOGY_ID = json_topology_id(DEFAULT_TOPOLOGY_NAME, context_id=CONTEXT_ID) +TOPOLOGY = json_topology(DEFAULT_TOPOLOGY_NAME, context_id=CONTEXT_ID) + +# ----- Monitoring Samples --------------------------------------------------------------------------------------------- +PACKET_PORT_SAMPLE_TYPES = [ + KpiSampleType.KPISAMPLETYPE_PACKETS_TRANSMITTED, + KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED, + KpiSampleType.KPISAMPLETYPE_BYTES_TRANSMITTED, + KpiSampleType.KPISAMPLETYPE_BYTES_RECEIVED, +] + +# ----- Device Credentials and Settings -------------------------------------------------------------------------------- + + +# ----- Devices -------------------------------------------------------------------------------------------------------- + +CUR_PATH = os.path.dirname(os.path.abspath(__file__)) + +DEVICE_SW1_UUID = 'SW1' +DEVICE_SW1_TIMEOUT = 60 +DEVICE_SW1_ID = json_device_id(DEVICE_SW1_UUID) +DEVICE_SW1 = json_device_p4_disabled(DEVICE_SW1_UUID) + +DEVICE_SW1_DPID = 1 +DEVICE_SW1_NAME = DEVICE_SW1_UUID +DEVICE_SW1_IP_ADDR = '192.168.6.38' +DEVICE_SW1_PORT = '50001' +DEVICE_SW1_VENDOR = 'Open Networking Foundation' +DEVICE_SW1_HW_VER = 'BMv2 simple_switch' +DEVICE_SW1_SW_VER = 'Stratum' + +DEVICE_SW1_BIN_PATH = '/root/p4/bmv2.json' +DEVICE_SW1_INFO_PATH = '/root/p4/p4info.txt' + +DEVICE_SW1_ENDPOINT_DEFS = [json_endpoint_descriptor('1', 'port'), + json_endpoint_descriptor('2', 'port'), + json_endpoint_descriptor('3', 'port')] +DEVICE_SW1_ENDPOINTS = json_endpoints(DEVICE_SW1_ID, DEVICE_SW1_ENDPOINT_DEFS) +DEVICE_SW1_ENDPOINT_IDS = json_endpoint_ids(DEVICE_SW1_ID, DEVICE_SW1_ENDPOINT_DEFS) +ENDPOINT_ID_SW1_1 = DEVICE_SW1_ENDPOINTS[0]['endpoint_id'] +ENDPOINT_ID_SW1_2 = DEVICE_SW1_ENDPOINTS[1]['endpoint_id'] +ENDPOINT_ID_SW1_3 = DEVICE_SW1_ENDPOINTS[2]['endpoint_id'] + +DEVICE_SW1_CONNECT_RULES = json_device_connect_rules( + DEVICE_SW1_IP_ADDR, + DEVICE_SW1_PORT, + { + 'id': DEVICE_SW1_DPID, + 'name': DEVICE_SW1_NAME, + 'vendor': DEVICE_SW1_VENDOR, + 'hw_ver': DEVICE_SW1_HW_VER, + 'sw_ver': DEVICE_SW1_SW_VER, + 'timeout': DEVICE_SW1_TIMEOUT, + 'p4bin': DEVICE_SW1_BIN_PATH, + 'p4info': DEVICE_SW1_INFO_PATH + } +) + +DEVICE_SW2_UUID = 'SW2' +DEVICE_SW2_TIMEOUT = 60 +DEVICE_SW2_ID = json_device_id(DEVICE_SW2_UUID) +DEVICE_SW2 = json_device_p4_disabled(DEVICE_SW2_UUID) + +DEVICE_SW2_DPID = 1 +DEVICE_SW2_NAME = DEVICE_SW2_UUID +DEVICE_SW2_IP_ADDR = '192.168.6.38' +DEVICE_SW2_PORT = '50002' +DEVICE_SW2_VENDOR = 'Open Networking Foundation' +DEVICE_SW2_HW_VER = 'BMv2 simple_switch' +DEVICE_SW2_SW_VER = 'Stratum' + +DEVICE_SW2_BIN_PATH = '/root/p4/bmv2.json' +DEVICE_SW2_INFO_PATH = '/root/p4/p4info.txt' + +DEVICE_SW2_ENDPOINT_DEFS = [json_endpoint_descriptor('1', 'port'), + json_endpoint_descriptor('2', 'port')] +DEVICE_SW2_ENDPOINTS = json_endpoints(DEVICE_SW2_ID, DEVICE_SW2_ENDPOINT_DEFS) +DEVICE_SW2_ENDPOINT_IDS = json_endpoint_ids(DEVICE_SW2_ID, DEVICE_SW2_ENDPOINT_DEFS) +ENDPOINT_ID_SW2_1 = DEVICE_SW2_ENDPOINTS[0]['endpoint_id'] +ENDPOINT_ID_SW2_2 = DEVICE_SW2_ENDPOINTS[1]['endpoint_id'] + +DEVICE_SW2_CONNECT_RULES = json_device_connect_rules( + DEVICE_SW2_IP_ADDR, + DEVICE_SW2_PORT, + { + 'id': DEVICE_SW2_DPID, + 'name': DEVICE_SW2_NAME, + 'vendor': DEVICE_SW2_VENDOR, + 'hw_ver': DEVICE_SW2_HW_VER, + 'sw_ver': DEVICE_SW2_SW_VER, + 'timeout': DEVICE_SW2_TIMEOUT, + 'p4bin': DEVICE_SW2_BIN_PATH, + 'p4info': DEVICE_SW2_INFO_PATH + } +) + +DEVICE_SW3_UUID = 'SW3' +DEVICE_SW3_TIMEOUT = 60 +DEVICE_SW3_ID = json_device_id(DEVICE_SW3_UUID) +DEVICE_SW3 = json_device_p4_disabled(DEVICE_SW3_UUID) + +DEVICE_SW3_DPID = 1 +DEVICE_SW3_NAME = DEVICE_SW3_UUID +DEVICE_SW3_IP_ADDR = '192.168.6.38' +DEVICE_SW3_PORT = '50003' +DEVICE_SW3_VENDOR = 'Open Networking Foundation' +DEVICE_SW3_HW_VER = 'BMv2 simple_switch' +DEVICE_SW3_SW_VER = 'Stratum' + +DEVICE_SW3_BIN_PATH = '/root/p4/bmv2.json' +DEVICE_SW3_INFO_PATH = '/root/p4/p4info.txt' + +DEVICE_SW3_ENDPOINT_DEFS = [json_endpoint_descriptor('1', 'port'), + json_endpoint_descriptor('2', 'port')] +DEVICE_SW3_ENDPOINTS = json_endpoints(DEVICE_SW3_ID, DEVICE_SW3_ENDPOINT_DEFS) +DEVICE_SW3_ENDPOINT_IDS = json_endpoint_ids(DEVICE_SW3_ID, DEVICE_SW3_ENDPOINT_DEFS) +ENDPOINT_ID_SW3_1 = DEVICE_SW3_ENDPOINTS[0]['endpoint_id'] +ENDPOINT_ID_SW3_2 = DEVICE_SW3_ENDPOINTS[1]['endpoint_id'] + +DEVICE_SW3_CONNECT_RULES = json_device_connect_rules( + DEVICE_SW3_IP_ADDR, + DEVICE_SW3_PORT, + { + 'id': DEVICE_SW3_DPID, + 'name': DEVICE_SW3_NAME, + 'vendor': DEVICE_SW3_VENDOR, + 'hw_ver': DEVICE_SW3_HW_VER, + 'sw_ver': DEVICE_SW3_SW_VER, + 'timeout': DEVICE_SW3_TIMEOUT, + 'p4bin': DEVICE_SW3_BIN_PATH, + 'p4info': DEVICE_SW3_INFO_PATH + } +) + +DEVICE_SW4_UUID = 'SW4' +DEVICE_SW4_TIMEOUT = 60 +DEVICE_SW4_ID = json_device_id(DEVICE_SW4_UUID) +DEVICE_SW4 = json_device_p4_disabled(DEVICE_SW4_UUID) + +DEVICE_SW4_DPID = 1 +DEVICE_SW4_NAME = DEVICE_SW4_UUID +DEVICE_SW4_IP_ADDR = '192.168.6.38' +DEVICE_SW4_PORT = '50004' +DEVICE_SW4_VENDOR = 'Open Networking Foundation' +DEVICE_SW4_HW_VER = 'BMv2 simple_switch' +DEVICE_SW4_SW_VER = 'Stratum' + +DEVICE_SW4_BIN_PATH = '/root/p4/bmv2.json' +DEVICE_SW4_INFO_PATH = '/root/p4/p4info.txt' + +DEVICE_SW4_ENDPOINT_DEFS = [json_endpoint_descriptor('1', 'port'), + json_endpoint_descriptor('2', 'port'), + json_endpoint_descriptor('3', 'port')] +DEVICE_SW4_ENDPOINTS = json_endpoints(DEVICE_SW4_ID, DEVICE_SW4_ENDPOINT_DEFS) +DEVICE_SW4_ENDPOINT_IDS = json_endpoint_ids(DEVICE_SW4_ID, DEVICE_SW4_ENDPOINT_DEFS) +ENDPOINT_ID_SW4_1 = DEVICE_SW4_ENDPOINTS[0]['endpoint_id'] +ENDPOINT_ID_SW4_2 = DEVICE_SW4_ENDPOINTS[1]['endpoint_id'] +ENDPOINT_ID_SW4_3 = DEVICE_SW4_ENDPOINTS[2]['endpoint_id'] + +DEVICE_SW4_CONNECT_RULES = json_device_connect_rules( + DEVICE_SW4_IP_ADDR, + DEVICE_SW4_PORT, + { + 'id': DEVICE_SW4_DPID, + 'name': DEVICE_SW4_NAME, + 'vendor': DEVICE_SW4_VENDOR, + 'hw_ver': DEVICE_SW4_HW_VER, + 'sw_ver': DEVICE_SW4_SW_VER, + 'timeout': DEVICE_SW4_TIMEOUT, + 'p4bin': DEVICE_SW4_BIN_PATH, + 'p4info': DEVICE_SW4_INFO_PATH + } +) + +# ----- Links ---------------------------------------------------------------------------------------------------------- + +# Leftmost links +# SW1_1 - SW2_1 +LINK_SW1_SW2_UUID = get_link_uuid(ENDPOINT_ID_SW1_1, ENDPOINT_ID_SW2_1) +LINK_SW1_SW2_ID = json_link_id(LINK_SW1_SW2_UUID) +LINK_SW1_SW2 = json_link(LINK_SW1_SW2_UUID, [ENDPOINT_ID_SW1_1, ENDPOINT_ID_SW2_1]) + +# SW2_1 - SW1_1 +LINK_SW2_SW1_UUID = get_link_uuid(ENDPOINT_ID_SW2_1, ENDPOINT_ID_SW1_1) +LINK_SW2_SW1_ID = json_link_id(LINK_SW2_SW1_UUID) +LINK_SW2_SW1 = json_link(LINK_SW2_SW1_UUID, [ENDPOINT_ID_SW2_1, ENDPOINT_ID_SW1_1]) + +# SW1_2 - SW3_1 +LINK_SW1_SW3_UUID = get_link_uuid(ENDPOINT_ID_SW1_2, ENDPOINT_ID_SW3_1) +LINK_SW1_SW3_ID = json_link_id(LINK_SW1_SW3_UUID) +LINK_SW1_SW3 = json_link(LINK_SW1_SW3_UUID, [ENDPOINT_ID_SW1_2, ENDPOINT_ID_SW3_1]) + +# SW3_1 - SW1_2 +LINK_SW3_SW1_UUID = get_link_uuid(ENDPOINT_ID_SW3_1, ENDPOINT_ID_SW1_2) +LINK_SW3_SW1_ID = json_link_id(LINK_SW3_SW1_UUID) +LINK_SW3_SW1 = json_link(LINK_SW3_SW1_UUID, [ENDPOINT_ID_SW3_1, ENDPOINT_ID_SW1_2]) + + +# Rightmost links +# SW2_2 - SW4_1 +LINK_SW2_SW4_UUID = get_link_uuid(ENDPOINT_ID_SW2_2, ENDPOINT_ID_SW4_1) +LINK_SW2_SW4_ID = json_link_id(LINK_SW2_SW4_UUID) +LINK_SW2_SW4 = json_link(LINK_SW2_SW4_UUID, [ENDPOINT_ID_SW2_2, ENDPOINT_ID_SW4_1]) + +# SW4_1 - SW2_2 +LINK_SW4_SW2_UUID = get_link_uuid(ENDPOINT_ID_SW4_1, ENDPOINT_ID_SW2_2) +LINK_SW4_SW2_ID = json_link_id(LINK_SW4_SW2_UUID) +LINK_SW4_SW2 = json_link(LINK_SW4_SW2_UUID, [ENDPOINT_ID_SW4_1, ENDPOINT_ID_SW2_2]) + +# SW3_2 - SW4_2 +LINK_SW3_SW4_UUID = get_link_uuid(ENDPOINT_ID_SW3_2, ENDPOINT_ID_SW4_2) +LINK_SW3_SW4_ID = json_link_id(LINK_SW3_SW4_UUID) +LINK_SW3_SW4 = json_link(LINK_SW3_SW4_UUID, [ENDPOINT_ID_SW3_2, ENDPOINT_ID_SW4_2]) + +# SW4_2 - SW3_2 +LINK_SW4_SW3_UUID = get_link_uuid(ENDPOINT_ID_SW4_2, ENDPOINT_ID_SW3_2) +LINK_SW4_SW3_ID = json_link_id(LINK_SW4_SW3_UUID) +LINK_SW4_SW3 = json_link(LINK_SW4_SW3_UUID, [ENDPOINT_ID_SW4_2, ENDPOINT_ID_SW3_2]) + +# ----- Service ---------------------------------------------------------------------------------------------------------- + +SERVICE_SW1_SW4_UUID = get_service_uuid(ENDPOINT_ID_SW1_3, ENDPOINT_ID_SW4_3) +SERVICE_SW1_SW4 = json_service_p4_planned(SERVICE_SW1_SW4_UUID) +SERVICE_SW1_SW4_ENDPOINT_IDS = [DEVICE_SW1_ENDPOINT_IDS[2], DEVICE_SW4_ENDPOINT_IDS[2]] + +# ----- Object Collections --------------------------------------------------------------------------------------------- + +CONTEXTS = [CONTEXT] +TOPOLOGIES = [TOPOLOGY] + +DEVICES = [ + (DEVICE_SW1, DEVICE_SW1_CONNECT_RULES, DEVICE_SW1_ENDPOINTS), + (DEVICE_SW2, DEVICE_SW2_CONNECT_RULES, DEVICE_SW2_ENDPOINTS), + (DEVICE_SW3, DEVICE_SW3_CONNECT_RULES, DEVICE_SW3_ENDPOINTS), + (DEVICE_SW4, DEVICE_SW4_CONNECT_RULES, DEVICE_SW4_ENDPOINTS), +] + +LINKS = [ + LINK_SW1_SW2, + LINK_SW2_SW1, + + LINK_SW1_SW3, + LINK_SW3_SW1, + + LINK_SW2_SW4, + LINK_SW4_SW2, + + LINK_SW3_SW4, + LINK_SW4_SW3, +] + +SERVICES = [ + (SERVICE_SW1_SW4, SERVICE_SW1_SW4_ENDPOINT_IDS), +] diff --git a/src/tests/hackfest3/tests/__init__.py b/src/tests/hackfest3/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1549d9811aa5d1c193a44ad45d0d7773236c0612 --- /dev/null +++ b/src/tests/hackfest3/tests/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/tests/hackfest3/tests/test_functional_bootstrap.py b/src/tests/hackfest3/tests/test_functional_bootstrap.py new file mode 100644 index 0000000000000000000000000000000000000000..97269217336986a6a143a4a7ef94bd8b0710e9b0 --- /dev/null +++ b/src/tests/hackfest3/tests/test_functional_bootstrap.py @@ -0,0 +1,119 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy, logging, pytest +from common.Settings import get_setting +from common.tests.EventTools import EVENT_CREATE, EVENT_UPDATE, check_events +from common.tools.object_factory.Context import json_context_id +from common.tools.object_factory.Device import json_device_id +from common.tools.object_factory.Link import json_link_id +from common.tools.object_factory.Topology import json_topology_id +from context.client.ContextClient import ContextClient +from context.client.EventsCollector import EventsCollector +from common.proto.context_pb2 import ConfigActionEnum, Context, ContextId, Device, Empty, Link, Topology, DeviceOperationalStatusEnum +from device.client.DeviceClient import DeviceClient +from .Objects import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES + +from common.tools.object_factory.ConfigRule import ( + json_config_rule_set, json_config_rule_delete) + + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +@pytest.fixture(scope='session') +def context_client(): + _client = ContextClient(get_setting('CONTEXTSERVICE_SERVICE_HOST'), get_setting('CONTEXTSERVICE_SERVICE_PORT_GRPC')) + yield _client + _client.close() + + +@pytest.fixture(scope='session') +def device_client(): + _client = DeviceClient(get_setting('DEVICESERVICE_SERVICE_HOST'), get_setting('DEVICESERVICE_SERVICE_PORT_GRPC')) + yield _client + _client.close() + +def test_prepare_scenario(context_client : ContextClient): # pylint: disable=redefined-outer-name + + # ----- Create Contexts and Topologies ----------------------------------------------------------------------------- + for context in CONTEXTS: + context_uuid = context['context_id']['context_uuid']['uuid'] + LOGGER.info('Adding Context {:s}'.format(context_uuid)) + response = context_client.SetContext(Context(**context)) + context_data = context_client.GetContext(response) + assert context_data.name == context_uuid + + for topology in TOPOLOGIES: + context_uuid = topology['topology_id']['context_id']['context_uuid']['uuid'] + topology_uuid = topology['topology_id']['topology_uuid']['uuid'] + LOGGER.info('Adding Topology {:s}/{:s}'.format(context_uuid, topology_uuid)) + response = context_client.SetTopology(Topology(**topology)) +# assert response.context_id.context_uuid.uuid == context_uuid + + topology_data = context_client.GetTopology(response) + assert topology_data.name == topology_uuid + context_id = json_context_id(context_uuid) + + +def test_scenario_ready(context_client : ContextClient): # pylint: disable=redefined-outer-name + # ----- List entities - Ensure scenario is ready ------------------------------------------------------------------- + response = context_client.ListContexts(Empty()) + assert len(response.contexts) == len(CONTEXTS) + + response = context_client.ListTopologies(ContextId(**CONTEXT_ID)) + assert len(response.topologies) == len(TOPOLOGIES) + + response = context_client.ListDevices(Empty()) + assert len(response.devices) == 0 + +def test_devices_bootstraping( + context_client : ContextClient, device_client : DeviceClient): # pylint: disable=redefined-outer-name + + # ----- Create Devices --------------------------------------------------------------- + for device, connect_rules, endpoints, in DEVICES: + device_uuid = device['device_id']['device_uuid']['uuid'] + LOGGER.info('Adding Device {:s}'.format(device_uuid)) + + device_p4_with_connect_rules = copy.deepcopy(device) + device_p4_with_connect_rules['device_config']['config_rules'].extend(connect_rules) + device_p4_with_connect_rules['device_operational_status'] = \ + DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED + response = device_client.AddDevice(Device(**device_p4_with_connect_rules)) + + LOGGER.info('Adding Device {:s}'.format(device_uuid)) + device_p4_with_endpoints = copy.deepcopy(device) + device_p4_with_endpoints['device_id']['device_uuid']['uuid'] = response.device_uuid.uuid + device_p4_with_endpoints['device_endpoints'].extend(endpoints) + for i in device_p4_with_endpoints['device_endpoints']: + i['endpoint_id']['device_id']['device_uuid']['uuid'] = response.device_uuid.uuid + + LOGGER.info('Adding Endpoints {:s}'.format(device_uuid)) + device_client.ConfigureDevice(Device(**device_p4_with_endpoints)) + + for link in LINKS: + link_uuid = link['link_id']['link_uuid']['uuid'] + LOGGER.info('Adding Link {:s}'.format(link_uuid)) + response = context_client.SetLink(Link(**link)) + +def test_devices_bootstrapped(context_client : ContextClient): # pylint: disable=redefined-outer-name + # ----- List entities - Ensure bevices are created ----------------------------------------------------------------- + response = context_client.ListContexts(Empty()) + assert len(response.contexts) == len(CONTEXTS) + + response = context_client.ListTopologies(ContextId(**CONTEXT_ID)) + assert len(response.topologies) == len(TOPOLOGIES) + + response = context_client.ListDevices(Empty()) + assert len(response.devices) == len(DEVICES) diff --git a/src/tests/hackfest3/tests/test_functional_cleanup.py b/src/tests/hackfest3/tests/test_functional_cleanup.py new file mode 100644 index 0000000000000000000000000000000000000000..0a87649a0355256bd6b82163a33e84147bfe8438 --- /dev/null +++ b/src/tests/hackfest3/tests/test_functional_cleanup.py @@ -0,0 +1,82 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy, logging, pytest +from common.Settings import get_setting +from common.tests.EventTools import EVENT_REMOVE, check_events +from common.tools.object_factory.Context import json_context_id +from common.tools.object_factory.Device import json_device_id +from common.tools.object_factory.Link import json_link_id +from common.tools.object_factory.Topology import json_topology_id +from context.client.ContextClient import ContextClient +from context.client.EventsCollector import EventsCollector +from common.proto.context_pb2 import ConfigActionEnum, ContextId, Device, DeviceId, Empty, Link, LinkId, TopologyId, DeviceOperationalStatusEnum +from device.client.DeviceClient import DeviceClient +from .Objects import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + + +@pytest.fixture(scope='session') +def context_client(): + _client = ContextClient(get_setting('CONTEXTSERVICE_SERVICE_HOST'), get_setting('CONTEXTSERVICE_SERVICE_PORT_GRPC')) + yield _client + _client.close() + + +@pytest.fixture(scope='session') +def device_client(): + _client = DeviceClient(get_setting('DEVICESERVICE_SERVICE_HOST'), get_setting('DEVICESERVICE_SERVICE_PORT_GRPC')) + yield _client + _client.close() + +def test_scenario_cleanup( + context_client : ContextClient, device_client : DeviceClient): # pylint: disable=redefined-outer-name + + for link in LINKS: + link_uuid = link['link_id']['link_uuid']['uuid'] + LOGGER.info('Removing Link {:s}'.format(link_uuid)) + link_id = link['link_id'] + context_client.RemoveLink(LinkId(**link_id)) + + # ----- Delete Devices and Validate Collected Events --------------------------------------------------------------- + for device, _, _ in DEVICES: + + device_id = device['device_id'] + device_uuid = device_id['device_uuid']['uuid'] + LOGGER.info('Deleting Device {:s}'.format(device_uuid)) + LOGGER.info("yes") + #device_client.DeleteDevice(DeviceId(**device_id)) + context_client.RemoveDevice(DeviceId(**device_id)) + + response = context_client.ListDevices(Empty()) + assert len(response.devices) == 0 + + + # ----- Delete Topologies and Validate Collected Events ------------------------------------------------------------ + for topology in TOPOLOGIES: + topology_id = topology['topology_id'] + context_uuid = topology_id['context_id']['context_uuid']['uuid'] + topology_uuid = topology_id['topology_uuid']['uuid'] + LOGGER.info('Deleting Topology {:s}/{:s}'.format(context_uuid, topology_uuid)) + context_client.RemoveTopology(TopologyId(**topology_id)) + context_id = json_context_id(context_uuid) + + # ----- Delete Contexts and Validate Collected Events -------------------------------------------------------------- + for context in CONTEXTS: + context_id = context['context_id'] + context_uuid = context_id['context_uuid']['uuid'] + LOGGER.info('Deleting Context {:s}'.format(context_uuid)) + context_client.RemoveContext(ContextId(**context_id)) diff --git a/src/tests/hackfest3/tests/test_functional_create_service.py b/src/tests/hackfest3/tests/test_functional_create_service.py new file mode 100644 index 0000000000000000000000000000000000000000..6c0a5049e0cf76e937512388ea1a9eb36b126e36 --- /dev/null +++ b/src/tests/hackfest3/tests/test_functional_create_service.py @@ -0,0 +1,65 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy, logging, pytest +from common.Settings import get_setting +from common.tests.EventTools import EVENT_CREATE, EVENT_UPDATE, check_events +from common.tools.object_factory.Context import json_context_id +from common.tools.object_factory.Device import json_device_id +from common.tools.object_factory.Service import json_service_id +from common.tools.object_factory.Link import json_link_id +from common.tools.object_factory.Topology import json_topology_id +from context.client.ContextClient import ContextClient +from context.client.EventsCollector import EventsCollector +from common.proto.context_pb2 import Context, ContextId, Device, Empty, Link, Topology, Service, ServiceId +from device.client.DeviceClient import DeviceClient +from service.client.ServiceClient import ServiceClient +from .Objects import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, SERVICES +from common.proto.context_pb2 import ConfigActionEnum, Device, DeviceId,\ + DeviceOperationalStatusEnum + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +@pytest.fixture(scope='session') +def context_client(): + _client = ContextClient(get_setting('CONTEXTSERVICE_SERVICE_HOST'), get_setting('CONTEXTSERVICE_SERVICE_PORT_GRPC')) + yield _client + _client.close() + + +@pytest.fixture(scope='session') +def device_client(): + _client = DeviceClient(get_setting('DEVICESERVICE_SERVICE_HOST'), get_setting('DEVICESERVICE_SERVICE_PORT_GRPC')) + yield _client + _client.close() + +@pytest.fixture(scope='session') +def service_client(): + _client = ServiceClient(get_setting('SERVICESERVICE_SERVICE_HOST'), get_setting('SERVICESERVICE_SERVICE_PORT_GRPC')) + yield _client + _client.close() + +def test_rules_entry( + context_client : ContextClient, device_client : DeviceClient, service_client : ServiceClient): # pylint: disable=redefined-outer-name + + # ----- Create Services --------------------------------------------------------------- + for service, endpoints in SERVICES: + # Insert Service (table entries) + service_uuid = service['service_id']['service_uuid']['uuid'] + print('Creating Service {:s}'.format(service_uuid)) + service_p4 = copy.deepcopy(service) + service_client.CreateService(Service(**service_p4)) + service_p4['service_endpoint_ids'].extend(endpoints) + service_client.UpdateService(Service(**service_p4)) diff --git a/src/tests/hackfest3/tests/test_functional_delete_service.py b/src/tests/hackfest3/tests/test_functional_delete_service.py new file mode 100644 index 0000000000000000000000000000000000000000..c5821df4ccc1caa2a1d72ed98dbfcb82e9db21b1 --- /dev/null +++ b/src/tests/hackfest3/tests/test_functional_delete_service.py @@ -0,0 +1,62 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy, logging, pytest +from common.Settings import get_setting +from common.tests.EventTools import EVENT_REMOVE, check_events +from common.tools.object_factory.Context import json_context_id +from common.tools.object_factory.Device import json_device_id +from common.tools.object_factory.Service import json_service_id +from common.tools.object_factory.Link import json_link_id +from common.tools.object_factory.Topology import json_topology_id +from context.client.ContextClient import ContextClient +from context.client.EventsCollector import EventsCollector +from common.proto.context_pb2 import ConfigActionEnum, ContextId, Device, DeviceId, Empty, LinkId, TopologyId, Service, ServiceId, DeviceOperationalStatusEnum +from device.client.DeviceClient import DeviceClient +from service.client.ServiceClient import ServiceClient +from .Objects import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, SERVICES + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + + +@pytest.fixture(scope='session') +def context_client(): + _client = ContextClient(get_setting('CONTEXTSERVICE_SERVICE_HOST'), get_setting('CONTEXTSERVICE_SERVICE_PORT_GRPC')) + yield _client + _client.close() + + +@pytest.fixture(scope='session') +def device_client(): + _client = DeviceClient(get_setting('DEVICESERVICE_SERVICE_HOST'), get_setting('DEVICESERVICE_SERVICE_PORT_GRPC')) + yield _client + _client.close() + +@pytest.fixture(scope='session') +def service_client(): + _client = ServiceClient(get_setting('SERVICESERVICE_SERVICE_HOST'), get_setting('SERVICESERVICE_SERVICE_PORT_GRPC')) + yield _client + _client.close() + +def test_rules_delete( + context_client : ContextClient, device_client : DeviceClient, service_client : ServiceClient): # pylint: disable=redefined-outer-name + + # ----- Create Services --------------------------------------------------------------- + for service, endpoints in SERVICES: + # Delete Service (table entries) + service_uuid = service['service_id']['service_uuid']['uuid'] + print('Deleting Service {:s}'.format(service_uuid)) + service_p4 = copy.deepcopy(service) + response = service_client.DeleteService(ServiceId(**json_service_id(service_uuid, CONTEXT_ID)))