diff --git a/.gitignore b/.gitignore index 56f7580de26e47f75f8bf16346b35f35e229491d..e0f8501490a85015a57c7280aeba872fcb2c0692 100644 --- a/.gitignore +++ b/.gitignore @@ -53,6 +53,7 @@ coverage.xml .pytest_cache/ .benchmarks/ cover/ +*_report.xml # Translations *.mo diff --git a/proto/context.proto b/proto/context.proto index ab7dd5e699a9816daf3e3a180fffc6bfde0a8103..f5dec30796a8426f512947d369b8db5f5889471a 100644 --- a/proto/context.proto +++ b/proto/context.proto @@ -190,6 +190,7 @@ message DeviceList { message DeviceEvent { Event event = 1; DeviceId device_id = 2; + DeviceConfig device_config = 3; } diff --git a/run_tests_docker.sh b/run_tests_docker.sh new file mode 100755 index 0000000000000000000000000000000000000000..fd885140999ac0f045c162f361f0075af96a8d48 --- /dev/null +++ b/run_tests_docker.sh @@ -0,0 +1,59 @@ +#!/bin/bash + +######################################################################################################################## +# Define your deployment settings here +######################################################################################################################## + +# Set the URL of your local Docker registry where the images will be uploaded to. Leave it blank if you do not want to +# use any Docker registry. +REGISTRY_IMAGE="" +#REGISTRY_IMAGE="http://my-container-registry.local/" + +# Set the list of components you want to build images for, and deploy. +COMPONENTS="context device automation policy service compute monitoring centralizedattackdetector" + +# Set the tag you want to use for your images. +IMAGE_TAG="tf-dev" + +# Constants +TMP_FOLDER="./tmp" + +TMP_LOGS_FOLDER="$TMP_FOLDER/logs" +mkdir -p $TMP_LOGS_FOLDER + +for COMPONENT in $COMPONENTS; do + echo "Processing '$COMPONENT' component..." + IMAGE_NAME="$COMPONENT:$IMAGE_TAG" + IMAGE_URL="$REGISTRY_IMAGE/$IMAGE_NAME" + + echo " Building Docker image..." + BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}.log" + + if [ "$COMPONENT" == "automation" ] || [ "$COMPONENT" == "policy" ]; then + docker build -t "$IMAGE_NAME" -f ./src/"$COMPONENT"/Dockerfile ./src/"$COMPONENT"/ > "$BUILD_LOG" + else + docker build -t "$IMAGE_NAME" -f ./src/"$COMPONENT"/Dockerfile ./src/ > "$BUILD_LOG" + fi + + if [ -n "$REGISTRY_IMAGE" ]; then + echo "Pushing Docker image to '$REGISTRY_IMAGE'..." + + TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}.log" + docker tag "$IMAGE_NAME" "$IMAGE_URL" > "$TAG_LOG" + + PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}.log" + docker push "$IMAGE_URL" > "$PUSH_LOG" + fi +done + +echo "Preparing for running the tests..." + +if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create -d bridge teraflowbridge; fi + +for COMPONENT in $COMPONENTS; do + IMAGE_NAME="$COMPONENT:$IMAGE_TAG" + echo " Running tests for $COMPONENT:" + docker run -it -d --name $COMPONENT $IMAGE_NAME --network=teraflowbridge + docker exec -it $COMPONENT bash -c "pytest --log-level=DEBUG --verbose $COMPONENT/tests/test_unitary.py" + docker stop $COMPONENT +done diff --git a/scripts/build_run_report_tests_locally.sh b/scripts/build_run_report_tests_locally.sh new file mode 100755 index 0000000000000000000000000000000000000000..9bdc81d9894df35a6bcc325d78e7f1f5214e8a96 --- /dev/null +++ b/scripts/build_run_report_tests_locally.sh @@ -0,0 +1,57 @@ +#!/bin/bash +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +die () { + echo >&2 "$@" + exit 1 +} + +[ "$#" -eq 1 ] || die "component name required but not provided" + +COMPONENT_NAME=$1 # parameter +IMAGE_NAME="${COMPONENT_NAME}-local" +IMAGE_TAG="latest" + +if docker ps | grep $IMAGE_NAME +then + docker stop $IMAGE_NAME +fi + +if docker network list | grep teraflowbridge +then + echo "teraflowbridge is already created" +else + docker network create -d bridge teraflowbridge +fi + +docker build -t "$IMAGE_NAME:$IMAGE_TAG" -f ./src/$COMPONENT_NAME/Dockerfile . + +docker run --name $IMAGE_NAME -d -v "${PWD}/src/${COMPONENT_NAME}/tests:/home/${COMPONENT_NAME}/results" --network=teraflowbridge --rm $IMAGE_NAME:$IMAGE_TAG + +docker exec -i $IMAGE_NAME bash -c "coverage run -m pytest --log-level=INFO --verbose $COMPONENT_NAME/tests/ --junitxml=/home/${COMPONENT_NAME}/results/${COMPONENT_NAME}_report.xml" + +PROJECTDIR=`pwd` + +cd $PROJECTDIR/src +RCFILE=$PROJECTDIR/coverage/.coveragerc + +echo +echo "Coverage report:" +echo "----------------" +docker exec -i $IMAGE_NAME bash -c "coverage report --include='${COMPONENT_NAME}/*' --show-missing" + +# docker stop $IMAGE_NAME +docker rm -f $IMAGE_NAME +docker network rm teraflowbridge diff --git a/src/automation/README.md b/src/automation/README.md index 099980bcc4172bf9e5c2d59459f40ae4331696cf..e98d2b8ab62563f43cf2c1011e91fb2a1d08d378 100644 --- a/src/automation/README.md +++ b/src/automation/README.md @@ -1,28 +1,57 @@ -# Automation TeraFlow OS service +# TeraFlowSDN Automation service -The Automation service, also known as Zero-Touch Provisioning (ZTP), is tested on Ubuntu 20.04. Follow the instructions below to build, test, and run this service on your local environment. +This repository hosts the TeraFlowSDN Automation service, also known as Zero-Touch Provisioning (ZTP) service. +Follow the instructions below to build, test, and run this service on your local environment. -## Automation Teraflow OS service architecture +## TeraFlowSDN Automation service architecture -| The Automation Teraflow OS service architecture consists of six (6) interfaces listed below: | +The TeraFlowSDN Automation architecture consists of six (6) interfaces listed below: + +Interfaces | |----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| 1. The `AutomationGateway` interface that implements all the rpc functions that are described in `automation.proto` file. | -| 2. The `ContextGateway` interface that communicates with a `Context` Service gRPC client and implements all the rpc functions that are described in `context.proto` file. | -| 3. The `DeviceGateway` interface that communicates with a `Device` Service gRPC client and implements all the rpc functions that are described in `device.proto` file. | -| 4. The `AutomationService` interface that implements the `addDevice()` method by communicating with a `Context` gRPC client & a `Device` gRPC client through the use of `ContextService` interface & `DeviceService` interface respectively. | -| 5. The `ContextService` interface that implements the `getDevice()` & `getDeviceEvents()` methods by communicating with a `Context` gRPC client through the use of `ContextGateway` interface. | -| 6. The `DeviceService` interface that implements the `getInitialConfiguration()` & `configureDevice()` methods by communicating with a `Device` gRPC client through the use of `DeviceGateway` interface. | +| 1. The `AutomationGateway` interface that implements all the RPC functions that are described in `automation.proto` file. | +| 2. The `ContextGateway` interface that communicates with a `Context` Service gRPC client to invoke key RPC functions described in `context.proto` file. | +| 3. The `DeviceGateway` interface that communicates with a `Device` Service gRPC client to invoke key RPC functions described in `device.proto` file. | +| 4. The `AutomationService` interface that implements the `addDevice()`, `updateDevice()`, and `deleteDevice()` methods by communicating with a `Context` gRPC client and a `Device` gRPC client through the use of `ContextService` interface and `DeviceService` interface respectively. | +| 5. The `ContextService` interface that implements the `getDevice()` and `getDeviceEvents()` methods by communicating with a `Context` gRPC client through the use of `ContextGateway` interface. | +| 6. The `DeviceService` interface that implements the `getInitialConfiguration()`, `configureDevice()`, and `deleteDevice()` methods by communicating with a `Device` gRPC client through the use of `DeviceGateway` interface. | + + +## Prerequisites +The Automation service is currently tested against Ubuntu 20.04 and Java 11. -## Run with dev profile +To quickly install Java 11 on a Debian-based Linux distro do: ```bash -./mvnw clean quarkus:dev +sudo apt-get install openjdk-11-jdk -y ``` -## Running tests +Feel free to try more recent Java versions. + +## Compile + +```bash +./mvnw compile +``` + +## Run tests + +```bash +./mvnw test +``` -Run unit and functional tests `./mvnw clean test` +## Run service + +```bash +./mvnw quarkus:dev +```` + +## Clean + +```bash +./mvnw clean +``` ## Deploying on a Kubernetes cluster @@ -30,10 +59,16 @@ To create the K8s manifest file under `target/kubernetes/kubernetes.yml` to be u ```bash ./mvnw clean package -DskipUTs -DskipITs -``` +``` To deploy the application in a K8s cluster run ```bash kubectl apply -f "manifests/automationservice.yaml" ``` + +## Maintainers + +This TeraFlowSDN service is implemented by [UBITECH](https://www.ubitech.eu). + +Feel free to contact Georgios Katsikas (gkatsikas at ubitech dot eu) in case you have questions. diff --git a/src/automation/src/main/java/eu/teraflow/automation/ContextSubscriber.java b/src/automation/src/main/java/eu/teraflow/automation/ContextSubscriber.java index ce977d1ff24788c471ab6bf2c8d6b2c113fb5c63..c4d636b6b4dca7241808ade421f32a77861e4d3f 100644 --- a/src/automation/src/main/java/eu/teraflow/automation/ContextSubscriber.java +++ b/src/automation/src/main/java/eu/teraflow/automation/ContextSubscriber.java @@ -78,9 +78,9 @@ public class ContextSubscriber { automationService.deleteDevice(deviceEvent.getDeviceId()); break; case UPDATE: - // TODO a DeviceConfig object should be part of the DeviceEvent object in order - // for automationService.updateDevice() to be triggered automatically like - // addDevice(). + LOGGER.infof("Received %s for device [%s]", event, deviceId); + automationService.updateDevice( + deviceEvent.getDeviceId(), deviceEvent.getDeviceConfig().orElse(null)); case UNDEFINED: logWarningMessage(event, deviceId, eventType); break; diff --git a/src/automation/src/main/java/eu/teraflow/automation/Serializer.java b/src/automation/src/main/java/eu/teraflow/automation/Serializer.java index 4359d60db364b4fb28e2623cb0e832a656e803af..816500a57d8431b36f54a95ee714b59b5f984c62 100644 --- a/src/automation/src/main/java/eu/teraflow/automation/Serializer.java +++ b/src/automation/src/main/java/eu/teraflow/automation/Serializer.java @@ -269,6 +269,7 @@ public class Serializer { builder.setDeviceId(deviceId); builder.setEvent(serialize(deviceEvent.getEvent())); + builder.setDeviceConfig(serialize(deviceEvent.getDeviceConfig().orElse(null))); return builder.build(); } @@ -276,8 +277,9 @@ public class Serializer { public DeviceEvent deserialize(ContextOuterClass.DeviceEvent deviceEvent) { final var deviceId = deserialize(deviceEvent.getDeviceId()); final var event = deserialize(deviceEvent.getEvent()); + final var deviceConfig = deserialize(deviceEvent.getDeviceConfig()); - return new DeviceEvent(deviceId, event); + return new DeviceEvent(deviceId, event, deviceConfig); } public ContextOuterClass.ConfigActionEnum serialize(ConfigActionEnum configAction) { diff --git a/src/automation/src/main/java/eu/teraflow/automation/context/model/DeviceEvent.java b/src/automation/src/main/java/eu/teraflow/automation/context/model/DeviceEvent.java index efc0be8308fb9a75132cd604a84fd5b4822f3af7..526b9b7b2ba34edc6d538619bdb190a9aefa9d97 100644 --- a/src/automation/src/main/java/eu/teraflow/automation/context/model/DeviceEvent.java +++ b/src/automation/src/main/java/eu/teraflow/automation/context/model/DeviceEvent.java @@ -16,14 +16,23 @@ package eu.teraflow.automation.context.model; +import java.util.Optional; + public class DeviceEvent { private final Event event; private final String deviceId; + private final Optional<DeviceConfig> deviceConfig; public DeviceEvent(String deviceId, Event event) { + this(deviceId, event, null); + } + + public DeviceEvent(String deviceId, Event event, DeviceConfig deviceConfig) { this.event = event; this.deviceId = deviceId; + this.deviceConfig = + (deviceConfig == null) ? Optional.empty() : Optional.ofNullable(deviceConfig); } public Event getEvent() { @@ -34,8 +43,14 @@ public class DeviceEvent { return deviceId; } + public Optional<DeviceConfig> getDeviceConfig() { + return deviceConfig; + } + @Override public String toString() { - return String.format("%s[%s, %s]", getClass().getSimpleName(), deviceId, event.toString()); + return String.format( + "%s[%s, %s, %s]", + getClass().getSimpleName(), deviceId, event.toString(), deviceConfig.orElse(null)); } } diff --git a/src/automation/src/test/java/eu/teraflow/automation/SerializerTest.java b/src/automation/src/test/java/eu/teraflow/automation/SerializerTest.java index 74cdc0060230f22ae6c022627a4a8be47a9705b5..a02fbbca49319feb93de85efbe759a30a4ed3aa9 100644 --- a/src/automation/src/test/java/eu/teraflow/automation/SerializerTest.java +++ b/src/automation/src/test/java/eu/teraflow/automation/SerializerTest.java @@ -392,14 +392,51 @@ class SerializerTest { .setTimestamp(expectedTimestamp) .setEventType(ContextOuterClass.EventTypeEnum.EVENTTYPE_CREATE) .build(); + + final var expectedConfigRuleCustomA = + ContextOuterClass.ConfigRule_Custom.newBuilder() + .setResourceKey("resourceKeyA") + .setResourceValue("resourceValueA") + .build(); + + final var expectedConfigRuleCustomB = + ContextOuterClass.ConfigRule_Custom.newBuilder() + .setResourceKey("resourceKeyB") + .setResourceValue("resourceValueB") + .build(); + + final var expectedConfigRuleA = + ContextOuterClass.ConfigRule.newBuilder() + .setAction(ContextOuterClass.ConfigActionEnum.CONFIGACTION_SET) + .setCustom(expectedConfigRuleCustomA) + .build(); + final var expectedConfigRuleB = + ContextOuterClass.ConfigRule.newBuilder() + .setAction(ContextOuterClass.ConfigActionEnum.CONFIGACTION_DELETE) + .setCustom(expectedConfigRuleCustomB) + .build(); + + final var expectedDeviceConfig = + ContextOuterClass.DeviceConfig.newBuilder() + .addAllConfigRules(List.of(expectedConfigRuleA, expectedConfigRuleB)) + .build(); + final var expectedDeviceEvent = ContextOuterClass.DeviceEvent.newBuilder() .setDeviceId(expectedDeviceId) .setEvent(expectedEvent) + .setDeviceConfig(expectedDeviceConfig) .build(); final var creationEvent = new Event(1, EventTypeEnum.CREATE); - final var deviceEvent = new DeviceEvent("deviceId", creationEvent); + final var configRuleCustomA = new ConfigRuleCustom("resourceKeyA", "resourceValueA"); + final var configRuleCustomB = new ConfigRuleCustom("resourceKeyB", "resourceValueB"); + final var configRuleTypeA = new ConfigRuleTypeCustom(configRuleCustomA); + final var configRuleTypeB = new ConfigRuleTypeCustom(configRuleCustomB); + final var configRuleA = new ConfigRule(ConfigActionEnum.SET, configRuleTypeA); + final var configRuleB = new ConfigRule(ConfigActionEnum.DELETE, configRuleTypeB); + final var deviceConfig = new DeviceConfig(List.of(configRuleA, configRuleB)); + final var deviceEvent = new DeviceEvent("deviceId", creationEvent, deviceConfig); final var serializedDeviceEvent = serializer.serialize(deviceEvent); assertThat(serializedDeviceEvent).usingRecursiveComparison().isEqualTo(expectedDeviceEvent); @@ -412,7 +449,22 @@ class SerializerTest { final var expectedTimestamp = ContextOuterClass.Timestamp.newBuilder().setTimestamp(1).build(); final var creationEvent = new Event(1, expectedEventType); - final var expectedDeviceEvent = new DeviceEvent(dummyDeviceId, creationEvent); + + final var expectedConfigRuleCustomA = new ConfigRuleCustom("resourceKeyA", "resourceValueA"); + final var expectedConfigRuleCustomB = new ConfigRuleCustom("resourceKeyB", "resourceValueB"); + + final var expectedConfigRuleTypeA = new ConfigRuleTypeCustom(expectedConfigRuleCustomA); + final var expectedConfigRuleTypeB = new ConfigRuleTypeCustom(expectedConfigRuleCustomB); + + final var expectedConfigRuleA = new ConfigRule(ConfigActionEnum.SET, expectedConfigRuleTypeA); + final var expectedConfigRuleB = + new ConfigRule(ConfigActionEnum.DELETE, expectedConfigRuleTypeB); + + final var expectedDeviceConfig = + new DeviceConfig(List.of(expectedConfigRuleA, expectedConfigRuleB)); + + final var expectedDeviceEvent = + new DeviceEvent(dummyDeviceId, creationEvent, expectedDeviceConfig); final var deviceUuid = Uuid.newBuilder().setUuid("deviceId"); final var deviceId = DeviceId.newBuilder().setDeviceUuid(deviceUuid).build(); @@ -421,8 +473,38 @@ class SerializerTest { .setTimestamp(expectedTimestamp) .setEventType(ContextOuterClass.EventTypeEnum.EVENTTYPE_REMOVE) .build(); + + final var configRuleCustomA = + ContextOuterClass.ConfigRule_Custom.newBuilder() + .setResourceKey("resourceKeyA") + .setResourceValue("resourceValueA") + .build(); + final var configRuleCustomB = + ContextOuterClass.ConfigRule_Custom.newBuilder() + .setResourceKey("resourceKeyB") + .setResourceValue("resourceValueB") + .build(); + final var configRuleA = + ContextOuterClass.ConfigRule.newBuilder() + .setAction(ContextOuterClass.ConfigActionEnum.CONFIGACTION_SET) + .setCustom(configRuleCustomA) + .build(); + final var configRuleB = + ContextOuterClass.ConfigRule.newBuilder() + .setAction(ContextOuterClass.ConfigActionEnum.CONFIGACTION_DELETE) + .setCustom(configRuleCustomB) + .build(); + final var deviceConfig = + ContextOuterClass.DeviceConfig.newBuilder() + .addAllConfigRules(List.of(configRuleA, configRuleB)) + .build(); + final var serializedDeviceEvent = - ContextOuterClass.DeviceEvent.newBuilder().setDeviceId(deviceId).setEvent(event).build(); + ContextOuterClass.DeviceEvent.newBuilder() + .setDeviceId(deviceId) + .setEvent(event) + .setDeviceConfig(deviceConfig) + .build(); final var deviceEvent = serializer.deserialize(serializedDeviceEvent); assertThat(deviceEvent).usingRecursiveComparison().isEqualTo(expectedDeviceEvent); diff --git a/src/automation/target/generated-sources/grpc/context/ContextOuterClass.java b/src/automation/target/generated-sources/grpc/context/ContextOuterClass.java index 45a64fabb43bab645e97e9d80bc1825242006dce..3c0d7ce36fcdc4e47697ba11a4ceb3d8e8cdea0c 100644 --- a/src/automation/target/generated-sources/grpc/context/ContextOuterClass.java +++ b/src/automation/target/generated-sources/grpc/context/ContextOuterClass.java @@ -17331,6 +17331,21 @@ public final class ContextOuterClass { * <code>.context.DeviceId device_id = 2;</code> */ context.ContextOuterClass.DeviceIdOrBuilder getDeviceIdOrBuilder(); + + /** + * <code>.context.DeviceConfig device_config = 3;</code> + * @return Whether the deviceConfig field is set. + */ + boolean hasDeviceConfig(); + /** + * <code>.context.DeviceConfig device_config = 3;</code> + * @return The deviceConfig. + */ + context.ContextOuterClass.DeviceConfig getDeviceConfig(); + /** + * <code>.context.DeviceConfig device_config = 3;</code> + */ + context.ContextOuterClass.DeviceConfigOrBuilder getDeviceConfigOrBuilder(); } /** * Protobuf type {@code context.DeviceEvent} @@ -17403,6 +17418,19 @@ public final class ContextOuterClass { break; } + case 26: { + context.ContextOuterClass.DeviceConfig.Builder subBuilder = null; + if (deviceConfig_ != null) { + subBuilder = deviceConfig_.toBuilder(); + } + deviceConfig_ = input.readMessage(context.ContextOuterClass.DeviceConfig.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(deviceConfig_); + deviceConfig_ = subBuilder.buildPartial(); + } + + break; + } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { @@ -17487,6 +17515,32 @@ public final class ContextOuterClass { return getDeviceId(); } + public static final int DEVICE_CONFIG_FIELD_NUMBER = 3; + private context.ContextOuterClass.DeviceConfig deviceConfig_; + /** + * <code>.context.DeviceConfig device_config = 3;</code> + * @return Whether the deviceConfig field is set. + */ + @java.lang.Override + public boolean hasDeviceConfig() { + return deviceConfig_ != null; + } + /** + * <code>.context.DeviceConfig device_config = 3;</code> + * @return The deviceConfig. + */ + @java.lang.Override + public context.ContextOuterClass.DeviceConfig getDeviceConfig() { + return deviceConfig_ == null ? context.ContextOuterClass.DeviceConfig.getDefaultInstance() : deviceConfig_; + } + /** + * <code>.context.DeviceConfig device_config = 3;</code> + */ + @java.lang.Override + public context.ContextOuterClass.DeviceConfigOrBuilder getDeviceConfigOrBuilder() { + return getDeviceConfig(); + } + private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { @@ -17507,6 +17561,9 @@ public final class ContextOuterClass { if (deviceId_ != null) { output.writeMessage(2, getDeviceId()); } + if (deviceConfig_ != null) { + output.writeMessage(3, getDeviceConfig()); + } unknownFields.writeTo(output); } @@ -17524,6 +17581,10 @@ public final class ContextOuterClass { size += com.google.protobuf.CodedOutputStream .computeMessageSize(2, getDeviceId()); } + if (deviceConfig_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, getDeviceConfig()); + } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; @@ -17549,6 +17610,11 @@ public final class ContextOuterClass { if (!getDeviceId() .equals(other.getDeviceId())) return false; } + if (hasDeviceConfig() != other.hasDeviceConfig()) return false; + if (hasDeviceConfig()) { + if (!getDeviceConfig() + .equals(other.getDeviceConfig())) return false; + } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @@ -17568,6 +17634,10 @@ public final class ContextOuterClass { hash = (37 * hash) + DEVICE_ID_FIELD_NUMBER; hash = (53 * hash) + getDeviceId().hashCode(); } + if (hasDeviceConfig()) { + hash = (37 * hash) + DEVICE_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getDeviceConfig().hashCode(); + } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; @@ -17713,6 +17783,12 @@ public final class ContextOuterClass { deviceId_ = null; deviceIdBuilder_ = null; } + if (deviceConfigBuilder_ == null) { + deviceConfig_ = null; + } else { + deviceConfig_ = null; + deviceConfigBuilder_ = null; + } return this; } @@ -17749,6 +17825,11 @@ public final class ContextOuterClass { } else { result.deviceId_ = deviceIdBuilder_.build(); } + if (deviceConfigBuilder_ == null) { + result.deviceConfig_ = deviceConfig_; + } else { + result.deviceConfig_ = deviceConfigBuilder_.build(); + } onBuilt(); return result; } @@ -17803,6 +17884,9 @@ public final class ContextOuterClass { if (other.hasDeviceId()) { mergeDeviceId(other.getDeviceId()); } + if (other.hasDeviceConfig()) { + mergeDeviceConfig(other.getDeviceConfig()); + } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; @@ -18069,6 +18153,125 @@ public final class ContextOuterClass { } return deviceIdBuilder_; } + + private context.ContextOuterClass.DeviceConfig deviceConfig_; + private com.google.protobuf.SingleFieldBuilderV3< + context.ContextOuterClass.DeviceConfig, context.ContextOuterClass.DeviceConfig.Builder, context.ContextOuterClass.DeviceConfigOrBuilder> deviceConfigBuilder_; + /** + * <code>.context.DeviceConfig device_config = 3;</code> + * @return Whether the deviceConfig field is set. + */ + public boolean hasDeviceConfig() { + return deviceConfigBuilder_ != null || deviceConfig_ != null; + } + /** + * <code>.context.DeviceConfig device_config = 3;</code> + * @return The deviceConfig. + */ + public context.ContextOuterClass.DeviceConfig getDeviceConfig() { + if (deviceConfigBuilder_ == null) { + return deviceConfig_ == null ? context.ContextOuterClass.DeviceConfig.getDefaultInstance() : deviceConfig_; + } else { + return deviceConfigBuilder_.getMessage(); + } + } + /** + * <code>.context.DeviceConfig device_config = 3;</code> + */ + public Builder setDeviceConfig(context.ContextOuterClass.DeviceConfig value) { + if (deviceConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + deviceConfig_ = value; + onChanged(); + } else { + deviceConfigBuilder_.setMessage(value); + } + + return this; + } + /** + * <code>.context.DeviceConfig device_config = 3;</code> + */ + public Builder setDeviceConfig( + context.ContextOuterClass.DeviceConfig.Builder builderForValue) { + if (deviceConfigBuilder_ == null) { + deviceConfig_ = builderForValue.build(); + onChanged(); + } else { + deviceConfigBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * <code>.context.DeviceConfig device_config = 3;</code> + */ + public Builder mergeDeviceConfig(context.ContextOuterClass.DeviceConfig value) { + if (deviceConfigBuilder_ == null) { + if (deviceConfig_ != null) { + deviceConfig_ = + context.ContextOuterClass.DeviceConfig.newBuilder(deviceConfig_).mergeFrom(value).buildPartial(); + } else { + deviceConfig_ = value; + } + onChanged(); + } else { + deviceConfigBuilder_.mergeFrom(value); + } + + return this; + } + /** + * <code>.context.DeviceConfig device_config = 3;</code> + */ + public Builder clearDeviceConfig() { + if (deviceConfigBuilder_ == null) { + deviceConfig_ = null; + onChanged(); + } else { + deviceConfig_ = null; + deviceConfigBuilder_ = null; + } + + return this; + } + /** + * <code>.context.DeviceConfig device_config = 3;</code> + */ + public context.ContextOuterClass.DeviceConfig.Builder getDeviceConfigBuilder() { + + onChanged(); + return getDeviceConfigFieldBuilder().getBuilder(); + } + /** + * <code>.context.DeviceConfig device_config = 3;</code> + */ + public context.ContextOuterClass.DeviceConfigOrBuilder getDeviceConfigOrBuilder() { + if (deviceConfigBuilder_ != null) { + return deviceConfigBuilder_.getMessageOrBuilder(); + } else { + return deviceConfig_ == null ? + context.ContextOuterClass.DeviceConfig.getDefaultInstance() : deviceConfig_; + } + } + /** + * <code>.context.DeviceConfig device_config = 3;</code> + */ + private com.google.protobuf.SingleFieldBuilderV3< + context.ContextOuterClass.DeviceConfig, context.ContextOuterClass.DeviceConfig.Builder, context.ContextOuterClass.DeviceConfigOrBuilder> + getDeviceConfigFieldBuilder() { + if (deviceConfigBuilder_ == null) { + deviceConfigBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + context.ContextOuterClass.DeviceConfig, context.ContextOuterClass.DeviceConfig.Builder, context.ContextOuterClass.DeviceConfigOrBuilder>( + getDeviceConfig(), + getParentForChildren(), + isClean()); + deviceConfig_ = null; + } + return deviceConfigBuilder_; + } @java.lang.Override public final Builder setUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { @@ -61981,230 +62184,234 @@ public final class ContextOuterClass { "(\0132\023.context.ConfigRule\"5\n\014DeviceIdList\022" + "%\n\ndevice_ids\030\001 \003(\0132\021.context.DeviceId\"." + "\n\nDeviceList\022 \n\007devices\030\001 \003(\0132\017.context." + - "Device\"R\n\013DeviceEvent\022\035\n\005event\030\001 \001(\0132\016.c" + - "ontext.Event\022$\n\tdevice_id\030\002 \001(\0132\021.contex" + - "t.DeviceId\"*\n\006LinkId\022 \n\tlink_uuid\030\001 \001(\0132" + - "\r.context.Uuid\"X\n\004Link\022 \n\007link_id\030\001 \001(\0132" + - "\017.context.LinkId\022.\n\021link_endpoint_ids\030\002 " + - "\003(\0132\023.context.EndPointId\"/\n\nLinkIdList\022!" + - "\n\010link_ids\030\001 \003(\0132\017.context.LinkId\"(\n\010Lin" + - "kList\022\034\n\005links\030\001 \003(\0132\r.context.Link\"L\n\tL" + - "inkEvent\022\035\n\005event\030\001 \001(\0132\016.context.Event\022" + - " \n\007link_id\030\002 \001(\0132\017.context.LinkId\"X\n\tSer" + - "viceId\022&\n\ncontext_id\030\001 \001(\0132\022.context.Con" + - "textId\022#\n\014service_uuid\030\002 \001(\0132\r.context.U" + - "uid\"\315\002\n\007Service\022&\n\nservice_id\030\001 \001(\0132\022.co" + - "ntext.ServiceId\022.\n\014service_type\030\002 \001(\0162\030." + - "context.ServiceTypeEnum\0221\n\024service_endpo" + - "int_ids\030\003 \003(\0132\023.context.EndPointId\0220\n\023se" + - "rvice_constraints\030\004 \003(\0132\023.context.Constr" + - "aint\022.\n\016service_status\030\005 \001(\0132\026.context.S" + - "erviceStatus\022.\n\016service_config\030\006 \001(\0132\026.c" + - "ontext.ServiceConfig\022%\n\ttimestamp\030\007 \001(\0132" + - "\022.context.Timestamp\"C\n\rServiceStatus\0222\n\016" + - "service_status\030\001 \001(\0162\032.context.ServiceSt" + - "atusEnum\":\n\rServiceConfig\022)\n\014config_rule" + - "s\030\001 \003(\0132\023.context.ConfigRule\"8\n\rServiceI" + - "dList\022\'\n\013service_ids\030\001 \003(\0132\022.context.Ser" + - "viceId\"1\n\013ServiceList\022\"\n\010services\030\001 \003(\0132" + - "\020.context.Service\"U\n\014ServiceEvent\022\035\n\005eve" + - "nt\030\001 \001(\0132\016.context.Event\022&\n\nservice_id\030\002" + - " \001(\0132\022.context.ServiceId\"T\n\007SliceId\022&\n\nc" + - "ontext_id\030\001 \001(\0132\022.context.ContextId\022!\n\ns" + - "lice_uuid\030\002 \001(\0132\r.context.Uuid\"\222\003\n\005Slice" + - "\022\"\n\010slice_id\030\001 \001(\0132\020.context.SliceId\022/\n\022" + - "slice_endpoint_ids\030\002 \003(\0132\023.context.EndPo" + - "intId\022.\n\021slice_constraints\030\003 \003(\0132\023.conte" + - "xt.Constraint\022-\n\021slice_service_ids\030\004 \003(\013" + - "2\022.context.ServiceId\022,\n\022slice_subslice_i" + - "ds\030\005 \003(\0132\020.context.SliceId\022*\n\014slice_stat" + - "us\030\006 \001(\0132\024.context.SliceStatus\022*\n\014slice_" + - "config\030\007 \001(\0132\024.context.SliceConfig\022(\n\013sl" + - "ice_owner\030\010 \001(\0132\023.context.SliceOwner\022%\n\t" + - "timestamp\030\t \001(\0132\022.context.Timestamp\"E\n\nS" + - "liceOwner\022!\n\nowner_uuid\030\001 \001(\0132\r.context." + - "Uuid\022\024\n\014owner_string\030\002 \001(\t\"=\n\013SliceStatu" + - "s\022.\n\014slice_status\030\001 \001(\0162\030.context.SliceS" + - "tatusEnum\"8\n\013SliceConfig\022)\n\014config_rules" + - "\030\001 \003(\0132\023.context.ConfigRule\"2\n\013SliceIdLi" + - "st\022#\n\tslice_ids\030\001 \003(\0132\020.context.SliceId\"" + - "+\n\tSliceList\022\036\n\006slices\030\001 \003(\0132\016.context.S" + - "lice\"O\n\nSliceEvent\022\035\n\005event\030\001 \001(\0132\016.cont" + - "ext.Event\022\"\n\010slice_id\030\002 \001(\0132\020.context.Sl" + - "iceId\"6\n\014ConnectionId\022&\n\017connection_uuid" + - "\030\001 \001(\0132\r.context.Uuid\"2\n\025ConnectionSetti" + - "ngs_L0\022\031\n\021lsp_symbolic_name\030\001 \001(\t\"\236\001\n\025Co" + - "nnectionSettings_L2\022\027\n\017src_mac_address\030\001" + - " \001(\t\022\027\n\017dst_mac_address\030\002 \001(\t\022\022\n\nether_t" + - "ype\030\003 \001(\r\022\017\n\007vlan_id\030\004 \001(\r\022\022\n\nmpls_label" + - "\030\005 \001(\r\022\032\n\022mpls_traffic_class\030\006 \001(\r\"t\n\025Co" + - "nnectionSettings_L3\022\026\n\016src_ip_address\030\001 " + - "\001(\t\022\026\n\016dst_ip_address\030\002 \001(\t\022\014\n\004dscp\030\003 \001(" + - "\r\022\020\n\010protocol\030\004 \001(\r\022\013\n\003ttl\030\005 \001(\r\"[\n\025Conn" + - "ectionSettings_L4\022\020\n\010src_port\030\001 \001(\r\022\020\n\010d" + - "st_port\030\002 \001(\r\022\021\n\ttcp_flags\030\003 \001(\r\022\013\n\003ttl\030" + - "\004 \001(\r\"\304\001\n\022ConnectionSettings\022*\n\002l0\030\001 \001(\013" + - "2\036.context.ConnectionSettings_L0\022*\n\002l2\030\002" + - " \001(\0132\036.context.ConnectionSettings_L2\022*\n\002" + - "l3\030\003 \001(\0132\036.context.ConnectionSettings_L3" + - "\022*\n\002l4\030\004 \001(\0132\036.context.ConnectionSetting" + - "s_L4\"\363\001\n\nConnection\022,\n\rconnection_id\030\001 \001" + - "(\0132\025.context.ConnectionId\022&\n\nservice_id\030" + - "\002 \001(\0132\022.context.ServiceId\0223\n\026path_hops_e" + - "ndpoint_ids\030\003 \003(\0132\023.context.EndPointId\022+" + - "\n\017sub_service_ids\030\004 \003(\0132\022.context.Servic" + - "eId\022-\n\010settings\030\005 \001(\0132\033.context.Connecti" + - "onSettings\"A\n\020ConnectionIdList\022-\n\016connec" + - "tion_ids\030\001 \003(\0132\025.context.ConnectionId\":\n" + - "\016ConnectionList\022(\n\013connections\030\001 \003(\0132\023.c" + - "ontext.Connection\"^\n\017ConnectionEvent\022\035\n\005" + - "event\030\001 \001(\0132\016.context.Event\022,\n\rconnectio" + - "n_id\030\002 \001(\0132\025.context.ConnectionId\"\202\001\n\nEn" + - "dPointId\022(\n\013topology_id\030\001 \001(\0132\023.context." + - "TopologyId\022$\n\tdevice_id\030\002 \001(\0132\021.context." + - "DeviceId\022$\n\rendpoint_uuid\030\003 \001(\0132\r.contex" + - "t.Uuid\"\264\001\n\010EndPoint\022(\n\013endpoint_id\030\001 \001(\013" + - "2\023.context.EndPointId\022\025\n\rendpoint_type\030\002" + - " \001(\t\0229\n\020kpi_sample_types\030\003 \003(\0162\037.kpi_sam" + - "ple_types.KpiSampleType\022,\n\021endpoint_loca" + - "tion\030\004 \001(\0132\021.context.Location\"A\n\021ConfigR" + - "ule_Custom\022\024\n\014resource_key\030\001 \001(\t\022\026\n\016reso" + - "urce_value\030\002 \001(\t\"]\n\016ConfigRule_ACL\022(\n\013en" + - "dpoint_id\030\001 \001(\0132\023.context.EndPointId\022!\n\010" + - "rule_set\030\002 \001(\0132\017.acl.AclRuleSet\"\234\001\n\nConf" + - "igRule\022)\n\006action\030\001 \001(\0162\031.context.ConfigA" + - "ctionEnum\022,\n\006custom\030\002 \001(\0132\032.context.Conf" + - "igRule_CustomH\000\022&\n\003acl\030\003 \001(\0132\027.context.C" + - "onfigRule_ACLH\000B\r\n\013config_rule\"F\n\021Constr" + - "aint_Custom\022\027\n\017constraint_type\030\001 \001(\t\022\030\n\020" + - "constraint_value\030\002 \001(\t\"E\n\023Constraint_Sch" + - "edule\022\027\n\017start_timestamp\030\001 \001(\002\022\025\n\rdurati" + - "on_days\030\002 \001(\002\"3\n\014GPS_Position\022\020\n\010latitud" + - "e\030\001 \001(\002\022\021\n\tlongitude\030\002 \001(\002\"W\n\010Location\022\020" + - "\n\006region\030\001 \001(\tH\000\022-\n\014gps_position\030\002 \001(\0132\025" + - ".context.GPS_PositionH\000B\n\n\010location\"l\n\033C" + - "onstraint_EndPointLocation\022(\n\013endpoint_i" + - "d\030\001 \001(\0132\023.context.EndPointId\022#\n\010location" + - "\030\002 \001(\0132\021.context.Location\"Y\n\033Constraint_" + - "EndPointPriority\022(\n\013endpoint_id\030\001 \001(\0132\023." + - "context.EndPointId\022\020\n\010priority\030\002 \001(\r\"0\n\026" + - "Constraint_SLA_Latency\022\026\n\016e2e_latency_ms" + - "\030\001 \001(\002\"0\n\027Constraint_SLA_Capacity\022\025\n\rcap" + - "acity_gbps\030\001 \001(\002\"M\n\033Constraint_SLA_Avail" + - "ability\022\032\n\022num_disjoint_paths\030\001 \001(\r\022\022\n\na" + - "ll_active\030\002 \001(\010\"V\n\036Constraint_SLA_Isolat" + - "ion_level\0224\n\017isolation_level\030\001 \003(\0162\033.con" + - "text.IsolationLevelEnum\"\366\003\n\nConstraint\022," + - "\n\006custom\030\001 \001(\0132\032.context.Constraint_Cust" + - "omH\000\0220\n\010schedule\030\002 \001(\0132\034.context.Constra" + - "int_ScheduleH\000\022A\n\021endpoint_location\030\003 \001(" + - "\0132$.context.Constraint_EndPointLocationH" + - "\000\022A\n\021endpoint_priority\030\004 \001(\0132$.context.C" + - "onstraint_EndPointPriorityH\000\0228\n\014sla_capa" + - "city\030\005 \001(\0132 .context.Constraint_SLA_Capa" + - "cityH\000\0226\n\013sla_latency\030\006 \001(\0132\037.context.Co" + - "nstraint_SLA_LatencyH\000\022@\n\020sla_availabili" + - "ty\030\007 \001(\0132$.context.Constraint_SLA_Availa" + - "bilityH\000\022@\n\rsla_isolation\030\010 \001(\0132\'.contex" + - "t.Constraint_SLA_Isolation_levelH\000B\014\n\nco" + - "nstraint\"^\n\022TeraFlowController\022&\n\ncontex" + - "t_id\030\001 \001(\0132\022.context.ContextId\022\022\n\nip_add" + - "ress\030\002 \001(\t\022\014\n\004port\030\003 \001(\r\"U\n\024Authenticati" + - "onResult\022&\n\ncontext_id\030\001 \001(\0132\022.context.C" + - "ontextId\022\025\n\rauthenticated\030\002 \001(\010*j\n\rEvent" + - "TypeEnum\022\027\n\023EVENTTYPE_UNDEFINED\020\000\022\024\n\020EVE" + - "NTTYPE_CREATE\020\001\022\024\n\020EVENTTYPE_UPDATE\020\002\022\024\n" + - "\020EVENTTYPE_REMOVE\020\003*\305\001\n\020DeviceDriverEnum" + - "\022\032\n\026DEVICEDRIVER_UNDEFINED\020\000\022\033\n\027DEVICEDR" + - "IVER_OPENCONFIG\020\001\022\036\n\032DEVICEDRIVER_TRANSP" + - "ORT_API\020\002\022\023\n\017DEVICEDRIVER_P4\020\003\022&\n\"DEVICE" + - "DRIVER_IETF_NETWORK_TOPOLOGY\020\004\022\033\n\027DEVICE" + - "DRIVER_ONF_TR_352\020\005*\217\001\n\033DeviceOperationa" + - "lStatusEnum\022%\n!DEVICEOPERATIONALSTATUS_U" + - "NDEFINED\020\000\022$\n DEVICEOPERATIONALSTATUS_DI" + - "SABLED\020\001\022#\n\037DEVICEOPERATIONALSTATUS_ENAB" + - "LED\020\002*\201\001\n\017ServiceTypeEnum\022\027\n\023SERVICETYPE" + - "_UNKNOWN\020\000\022\024\n\020SERVICETYPE_L3NM\020\001\022\024\n\020SERV" + - "ICETYPE_L2NM\020\002\022)\n%SERVICETYPE_TAPI_CONNE" + - "CTIVITY_SERVICE\020\003*\250\001\n\021ServiceStatusEnum\022" + - "\033\n\027SERVICESTATUS_UNDEFINED\020\000\022\031\n\025SERVICES" + - "TATUS_PLANNED\020\001\022\030\n\024SERVICESTATUS_ACTIVE\020" + - "\002\022!\n\035SERVICESTATUS_PENDING_REMOVAL\020\003\022\036\n\032" + - "SERVICESTATUS_SLA_VIOLATED\020\004*\251\001\n\017SliceSt" + - "atusEnum\022\031\n\025SLICESTATUS_UNDEFINED\020\000\022\027\n\023S" + - "LICESTATUS_PLANNED\020\001\022\024\n\020SLICESTATUS_INIT" + - "\020\002\022\026\n\022SLICESTATUS_ACTIVE\020\003\022\026\n\022SLICESTATU" + - "S_DEINIT\020\004\022\034\n\030SLICESTATUS_SLA_VIOLATED\020\005" + - "*]\n\020ConfigActionEnum\022\032\n\026CONFIGACTION_UND" + - "EFINED\020\000\022\024\n\020CONFIGACTION_SET\020\001\022\027\n\023CONFIG" + - "ACTION_DELETE\020\002*\203\002\n\022IsolationLevelEnum\022\020" + - "\n\014NO_ISOLATION\020\000\022\026\n\022PHYSICAL_ISOLATION\020\001" + - "\022\025\n\021LOGICAL_ISOLATION\020\002\022\025\n\021PROCESS_ISOLA" + - "TION\020\003\022\035\n\031PHYSICAL_MEMORY_ISOLATION\020\004\022\036\n" + - "\032PHYSICAL_NETWORK_ISOLATION\020\005\022\036\n\032VIRTUAL" + - "_RESOURCE_ISOLATION\020\006\022\037\n\033NETWORK_FUNCTIO" + - "NS_ISOLATION\020\007\022\025\n\021SERVICE_ISOLATION\020\0102\357\022" + - "\n\016ContextService\022:\n\016ListContextIds\022\016.con" + - "text.Empty\032\026.context.ContextIdList\"\000\0226\n\014" + - "ListContexts\022\016.context.Empty\032\024.context.C" + - "ontextList\"\000\0224\n\nGetContext\022\022.context.Con" + - "textId\032\020.context.Context\"\000\0224\n\nSetContext" + - "\022\020.context.Context\032\022.context.ContextId\"\000" + - "\0225\n\rRemoveContext\022\022.context.ContextId\032\016." + - "context.Empty\"\000\022=\n\020GetContextEvents\022\016.co" + - "ntext.Empty\032\025.context.ContextEvent\"\0000\001\022@" + - "\n\017ListTopologyIds\022\022.context.ContextId\032\027." + - "context.TopologyIdList\"\000\022=\n\016ListTopologi" + - "es\022\022.context.ContextId\032\025.context.Topolog" + - "yList\"\000\0227\n\013GetTopology\022\023.context.Topolog" + - "yId\032\021.context.Topology\"\000\0227\n\013SetTopology\022" + - "\021.context.Topology\032\023.context.TopologyId\"" + - "\000\0227\n\016RemoveTopology\022\023.context.TopologyId" + - "\032\016.context.Empty\"\000\022?\n\021GetTopologyEvents\022" + - "\016.context.Empty\032\026.context.TopologyEvent\"" + - "\0000\001\0228\n\rListDeviceIds\022\016.context.Empty\032\025.c" + - "ontext.DeviceIdList\"\000\0224\n\013ListDevices\022\016.c" + - "ontext.Empty\032\023.context.DeviceList\"\000\0221\n\tG" + - "etDevice\022\021.context.DeviceId\032\017.context.De" + - "vice\"\000\0221\n\tSetDevice\022\017.context.Device\032\021.c" + - "ontext.DeviceId\"\000\0223\n\014RemoveDevice\022\021.cont" + - "ext.DeviceId\032\016.context.Empty\"\000\022;\n\017GetDev" + - "iceEvents\022\016.context.Empty\032\024.context.Devi" + - "ceEvent\"\0000\001\0224\n\013ListLinkIds\022\016.context.Emp" + - "ty\032\023.context.LinkIdList\"\000\0220\n\tListLinks\022\016" + - ".context.Empty\032\021.context.LinkList\"\000\022+\n\007G" + - "etLink\022\017.context.LinkId\032\r.context.Link\"\000" + - "\022+\n\007SetLink\022\r.context.Link\032\017.context.Lin" + - "kId\"\000\022/\n\nRemoveLink\022\017.context.LinkId\032\016.c" + - "ontext.Empty\"\000\0227\n\rGetLinkEvents\022\016.contex" + - "t.Empty\032\022.context.LinkEvent\"\0000\001\022>\n\016ListS" + - "erviceIds\022\022.context.ContextId\032\026.context." + - "ServiceIdList\"\000\022:\n\014ListServices\022\022.contex" + - "t.ContextId\032\024.context.ServiceList\"\000\0224\n\nG" + - "etService\022\022.context.ServiceId\032\020.context." + - "Service\"\000\0224\n\nSetService\022\020.context.Servic" + - "e\032\022.context.ServiceId\"\000\0225\n\rRemoveService" + - "\022\022.context.ServiceId\032\016.context.Empty\"\000\022=" + - "\n\020GetServiceEvents\022\016.context.Empty\032\025.con" + - "text.ServiceEvent\"\0000\001\022:\n\014ListSliceIds\022\022." + - "context.ContextId\032\024.context.SliceIdList\"" + - "\000\0226\n\nListSlices\022\022.context.ContextId\032\022.co" + - "ntext.SliceList\"\000\022.\n\010GetSlice\022\020.context." + - "SliceId\032\016.context.Slice\"\000\022.\n\010SetSlice\022\016." + - "context.Slice\032\020.context.SliceId\"\000\0221\n\013Rem" + - "oveSlice\022\020.context.SliceId\032\016.context.Emp" + - "ty\"\000\0229\n\016GetSliceEvents\022\016.context.Empty\032\023" + - ".context.SliceEvent\"\0000\001\022D\n\021ListConnectio" + - "nIds\022\022.context.ServiceId\032\031.context.Conne" + - "ctionIdList\"\000\022@\n\017ListConnections\022\022.conte" + - "xt.ServiceId\032\027.context.ConnectionList\"\000\022" + - "=\n\rGetConnection\022\025.context.ConnectionId\032" + - "\023.context.Connection\"\000\022=\n\rSetConnection\022" + - "\023.context.Connection\032\025.context.Connectio" + - "nId\"\000\022;\n\020RemoveConnection\022\025.context.Conn" + - "ectionId\032\016.context.Empty\"\000\022C\n\023GetConnect" + - "ionEvents\022\016.context.Empty\032\030.context.Conn" + - "ectionEvent\"\0000\001b\006proto3" + "Device\"\200\001\n\013DeviceEvent\022\035\n\005event\030\001 \001(\0132\016." + + "context.Event\022$\n\tdevice_id\030\002 \001(\0132\021.conte" + + "xt.DeviceId\022,\n\rdevice_config\030\003 \001(\0132\025.con" + + "text.DeviceConfig\"*\n\006LinkId\022 \n\tlink_uuid" + + "\030\001 \001(\0132\r.context.Uuid\"X\n\004Link\022 \n\007link_id" + + "\030\001 \001(\0132\017.context.LinkId\022.\n\021link_endpoint" + + "_ids\030\002 \003(\0132\023.context.EndPointId\"/\n\nLinkI" + + "dList\022!\n\010link_ids\030\001 \003(\0132\017.context.LinkId" + + "\"(\n\010LinkList\022\034\n\005links\030\001 \003(\0132\r.context.Li" + + "nk\"L\n\tLinkEvent\022\035\n\005event\030\001 \001(\0132\016.context" + + ".Event\022 \n\007link_id\030\002 \001(\0132\017.context.LinkId" + + "\"X\n\tServiceId\022&\n\ncontext_id\030\001 \001(\0132\022.cont" + + "ext.ContextId\022#\n\014service_uuid\030\002 \001(\0132\r.co" + + "ntext.Uuid\"\315\002\n\007Service\022&\n\nservice_id\030\001 \001" + + "(\0132\022.context.ServiceId\022.\n\014service_type\030\002" + + " \001(\0162\030.context.ServiceTypeEnum\0221\n\024servic" + + "e_endpoint_ids\030\003 \003(\0132\023.context.EndPointI" + + "d\0220\n\023service_constraints\030\004 \003(\0132\023.context" + + ".Constraint\022.\n\016service_status\030\005 \001(\0132\026.co" + + "ntext.ServiceStatus\022.\n\016service_config\030\006 " + + "\001(\0132\026.context.ServiceConfig\022%\n\ttimestamp" + + "\030\007 \001(\0132\022.context.Timestamp\"C\n\rServiceSta" + + "tus\0222\n\016service_status\030\001 \001(\0162\032.context.Se" + + "rviceStatusEnum\":\n\rServiceConfig\022)\n\014conf" + + "ig_rules\030\001 \003(\0132\023.context.ConfigRule\"8\n\rS" + + "erviceIdList\022\'\n\013service_ids\030\001 \003(\0132\022.cont" + + "ext.ServiceId\"1\n\013ServiceList\022\"\n\010services" + + "\030\001 \003(\0132\020.context.Service\"U\n\014ServiceEvent" + + "\022\035\n\005event\030\001 \001(\0132\016.context.Event\022&\n\nservi" + + "ce_id\030\002 \001(\0132\022.context.ServiceId\"T\n\007Slice" + + "Id\022&\n\ncontext_id\030\001 \001(\0132\022.context.Context" + + "Id\022!\n\nslice_uuid\030\002 \001(\0132\r.context.Uuid\"\222\003" + + "\n\005Slice\022\"\n\010slice_id\030\001 \001(\0132\020.context.Slic" + + "eId\022/\n\022slice_endpoint_ids\030\002 \003(\0132\023.contex" + + "t.EndPointId\022.\n\021slice_constraints\030\003 \003(\0132" + + "\023.context.Constraint\022-\n\021slice_service_id" + + "s\030\004 \003(\0132\022.context.ServiceId\022,\n\022slice_sub" + + "slice_ids\030\005 \003(\0132\020.context.SliceId\022*\n\014sli" + + "ce_status\030\006 \001(\0132\024.context.SliceStatus\022*\n" + + "\014slice_config\030\007 \001(\0132\024.context.SliceConfi" + + "g\022(\n\013slice_owner\030\010 \001(\0132\023.context.SliceOw" + + "ner\022%\n\ttimestamp\030\t \001(\0132\022.context.Timesta" + + "mp\"E\n\nSliceOwner\022!\n\nowner_uuid\030\001 \001(\0132\r.c" + + "ontext.Uuid\022\024\n\014owner_string\030\002 \001(\t\"=\n\013Sli" + + "ceStatus\022.\n\014slice_status\030\001 \001(\0162\030.context" + + ".SliceStatusEnum\"8\n\013SliceConfig\022)\n\014confi" + + "g_rules\030\001 \003(\0132\023.context.ConfigRule\"2\n\013Sl" + + "iceIdList\022#\n\tslice_ids\030\001 \003(\0132\020.context.S" + + "liceId\"+\n\tSliceList\022\036\n\006slices\030\001 \003(\0132\016.co" + + "ntext.Slice\"O\n\nSliceEvent\022\035\n\005event\030\001 \001(\013" + + "2\016.context.Event\022\"\n\010slice_id\030\002 \001(\0132\020.con" + + "text.SliceId\"6\n\014ConnectionId\022&\n\017connecti" + + "on_uuid\030\001 \001(\0132\r.context.Uuid\"2\n\025Connecti" + + "onSettings_L0\022\031\n\021lsp_symbolic_name\030\001 \001(\t" + + "\"\236\001\n\025ConnectionSettings_L2\022\027\n\017src_mac_ad" + + "dress\030\001 \001(\t\022\027\n\017dst_mac_address\030\002 \001(\t\022\022\n\n" + + "ether_type\030\003 \001(\r\022\017\n\007vlan_id\030\004 \001(\r\022\022\n\nmpl" + + "s_label\030\005 \001(\r\022\032\n\022mpls_traffic_class\030\006 \001(" + + "\r\"t\n\025ConnectionSettings_L3\022\026\n\016src_ip_add" + + "ress\030\001 \001(\t\022\026\n\016dst_ip_address\030\002 \001(\t\022\014\n\004ds" + + "cp\030\003 \001(\r\022\020\n\010protocol\030\004 \001(\r\022\013\n\003ttl\030\005 \001(\r\"" + + "[\n\025ConnectionSettings_L4\022\020\n\010src_port\030\001 \001" + + "(\r\022\020\n\010dst_port\030\002 \001(\r\022\021\n\ttcp_flags\030\003 \001(\r\022" + + "\013\n\003ttl\030\004 \001(\r\"\304\001\n\022ConnectionSettings\022*\n\002l" + + "0\030\001 \001(\0132\036.context.ConnectionSettings_L0\022" + + "*\n\002l2\030\002 \001(\0132\036.context.ConnectionSettings" + + "_L2\022*\n\002l3\030\003 \001(\0132\036.context.ConnectionSett" + + "ings_L3\022*\n\002l4\030\004 \001(\0132\036.context.Connection" + + "Settings_L4\"\363\001\n\nConnection\022,\n\rconnection" + + "_id\030\001 \001(\0132\025.context.ConnectionId\022&\n\nserv" + + "ice_id\030\002 \001(\0132\022.context.ServiceId\0223\n\026path" + + "_hops_endpoint_ids\030\003 \003(\0132\023.context.EndPo" + + "intId\022+\n\017sub_service_ids\030\004 \003(\0132\022.context" + + ".ServiceId\022-\n\010settings\030\005 \001(\0132\033.context.C" + + "onnectionSettings\"A\n\020ConnectionIdList\022-\n" + + "\016connection_ids\030\001 \003(\0132\025.context.Connecti" + + "onId\":\n\016ConnectionList\022(\n\013connections\030\001 " + + "\003(\0132\023.context.Connection\"^\n\017ConnectionEv" + + "ent\022\035\n\005event\030\001 \001(\0132\016.context.Event\022,\n\rco" + + "nnection_id\030\002 \001(\0132\025.context.ConnectionId" + + "\"\202\001\n\nEndPointId\022(\n\013topology_id\030\001 \001(\0132\023.c" + + "ontext.TopologyId\022$\n\tdevice_id\030\002 \001(\0132\021.c" + + "ontext.DeviceId\022$\n\rendpoint_uuid\030\003 \001(\0132\r" + + ".context.Uuid\"\264\001\n\010EndPoint\022(\n\013endpoint_i" + + "d\030\001 \001(\0132\023.context.EndPointId\022\025\n\rendpoint" + + "_type\030\002 \001(\t\0229\n\020kpi_sample_types\030\003 \003(\0162\037." + + "kpi_sample_types.KpiSampleType\022,\n\021endpoi" + + "nt_location\030\004 \001(\0132\021.context.Location\"A\n\021" + + "ConfigRule_Custom\022\024\n\014resource_key\030\001 \001(\t\022" + + "\026\n\016resource_value\030\002 \001(\t\"]\n\016ConfigRule_AC" + + "L\022(\n\013endpoint_id\030\001 \001(\0132\023.context.EndPoin" + + "tId\022!\n\010rule_set\030\002 \001(\0132\017.acl.AclRuleSet\"\234" + + "\001\n\nConfigRule\022)\n\006action\030\001 \001(\0162\031.context." + + "ConfigActionEnum\022,\n\006custom\030\002 \001(\0132\032.conte" + + "xt.ConfigRule_CustomH\000\022&\n\003acl\030\003 \001(\0132\027.co" + + "ntext.ConfigRule_ACLH\000B\r\n\013config_rule\"F\n" + + "\021Constraint_Custom\022\027\n\017constraint_type\030\001 " + + "\001(\t\022\030\n\020constraint_value\030\002 \001(\t\"E\n\023Constra" + + "int_Schedule\022\027\n\017start_timestamp\030\001 \001(\002\022\025\n" + + "\rduration_days\030\002 \001(\002\"3\n\014GPS_Position\022\020\n\010" + + "latitude\030\001 \001(\002\022\021\n\tlongitude\030\002 \001(\002\"W\n\010Loc" + + "ation\022\020\n\006region\030\001 \001(\tH\000\022-\n\014gps_position\030" + + "\002 \001(\0132\025.context.GPS_PositionH\000B\n\n\010locati" + + "on\"l\n\033Constraint_EndPointLocation\022(\n\013end" + + "point_id\030\001 \001(\0132\023.context.EndPointId\022#\n\010l" + + "ocation\030\002 \001(\0132\021.context.Location\"Y\n\033Cons" + + "traint_EndPointPriority\022(\n\013endpoint_id\030\001" + + " \001(\0132\023.context.EndPointId\022\020\n\010priority\030\002 " + + "\001(\r\"0\n\026Constraint_SLA_Latency\022\026\n\016e2e_lat" + + "ency_ms\030\001 \001(\002\"0\n\027Constraint_SLA_Capacity" + + "\022\025\n\rcapacity_gbps\030\001 \001(\002\"M\n\033Constraint_SL" + + "A_Availability\022\032\n\022num_disjoint_paths\030\001 \001" + + "(\r\022\022\n\nall_active\030\002 \001(\010\"V\n\036Constraint_SLA" + + "_Isolation_level\0224\n\017isolation_level\030\001 \003(" + + "\0162\033.context.IsolationLevelEnum\"\366\003\n\nConst" + + "raint\022,\n\006custom\030\001 \001(\0132\032.context.Constrai" + + "nt_CustomH\000\0220\n\010schedule\030\002 \001(\0132\034.context." + + "Constraint_ScheduleH\000\022A\n\021endpoint_locati" + + "on\030\003 \001(\0132$.context.Constraint_EndPointLo" + + "cationH\000\022A\n\021endpoint_priority\030\004 \001(\0132$.co" + + "ntext.Constraint_EndPointPriorityH\000\0228\n\014s" + + "la_capacity\030\005 \001(\0132 .context.Constraint_S" + + "LA_CapacityH\000\0226\n\013sla_latency\030\006 \001(\0132\037.con" + + "text.Constraint_SLA_LatencyH\000\022@\n\020sla_ava" + + "ilability\030\007 \001(\0132$.context.Constraint_SLA" + + "_AvailabilityH\000\022@\n\rsla_isolation\030\010 \001(\0132\'" + + ".context.Constraint_SLA_Isolation_levelH" + + "\000B\014\n\nconstraint\"^\n\022TeraFlowController\022&\n" + + "\ncontext_id\030\001 \001(\0132\022.context.ContextId\022\022\n" + + "\nip_address\030\002 \001(\t\022\014\n\004port\030\003 \001(\r\"U\n\024Authe" + + "nticationResult\022&\n\ncontext_id\030\001 \001(\0132\022.co" + + "ntext.ContextId\022\025\n\rauthenticated\030\002 \001(\010*j" + + "\n\rEventTypeEnum\022\027\n\023EVENTTYPE_UNDEFINED\020\000" + + "\022\024\n\020EVENTTYPE_CREATE\020\001\022\024\n\020EVENTTYPE_UPDA" + + "TE\020\002\022\024\n\020EVENTTYPE_REMOVE\020\003*\305\001\n\020DeviceDri" + + "verEnum\022\032\n\026DEVICEDRIVER_UNDEFINED\020\000\022\033\n\027D" + + "EVICEDRIVER_OPENCONFIG\020\001\022\036\n\032DEVICEDRIVER" + + "_TRANSPORT_API\020\002\022\023\n\017DEVICEDRIVER_P4\020\003\022&\n" + + "\"DEVICEDRIVER_IETF_NETWORK_TOPOLOGY\020\004\022\033\n" + + "\027DEVICEDRIVER_ONF_TR_352\020\005*\217\001\n\033DeviceOpe" + + "rationalStatusEnum\022%\n!DEVICEOPERATIONALS" + + "TATUS_UNDEFINED\020\000\022$\n DEVICEOPERATIONALST" + + "ATUS_DISABLED\020\001\022#\n\037DEVICEOPERATIONALSTAT" + + "US_ENABLED\020\002*\201\001\n\017ServiceTypeEnum\022\027\n\023SERV" + + "ICETYPE_UNKNOWN\020\000\022\024\n\020SERVICETYPE_L3NM\020\001\022" + + "\024\n\020SERVICETYPE_L2NM\020\002\022)\n%SERVICETYPE_TAP" + + "I_CONNECTIVITY_SERVICE\020\003*\250\001\n\021ServiceStat" + + "usEnum\022\033\n\027SERVICESTATUS_UNDEFINED\020\000\022\031\n\025S" + + "ERVICESTATUS_PLANNED\020\001\022\030\n\024SERVICESTATUS_" + + "ACTIVE\020\002\022!\n\035SERVICESTATUS_PENDING_REMOVA" + + "L\020\003\022\036\n\032SERVICESTATUS_SLA_VIOLATED\020\004*\251\001\n\017" + + "SliceStatusEnum\022\031\n\025SLICESTATUS_UNDEFINED" + + "\020\000\022\027\n\023SLICESTATUS_PLANNED\020\001\022\024\n\020SLICESTAT" + + "US_INIT\020\002\022\026\n\022SLICESTATUS_ACTIVE\020\003\022\026\n\022SLI" + + "CESTATUS_DEINIT\020\004\022\034\n\030SLICESTATUS_SLA_VIO" + + "LATED\020\005*]\n\020ConfigActionEnum\022\032\n\026CONFIGACT" + + "ION_UNDEFINED\020\000\022\024\n\020CONFIGACTION_SET\020\001\022\027\n" + + "\023CONFIGACTION_DELETE\020\002*\203\002\n\022IsolationLeve" + + "lEnum\022\020\n\014NO_ISOLATION\020\000\022\026\n\022PHYSICAL_ISOL" + + "ATION\020\001\022\025\n\021LOGICAL_ISOLATION\020\002\022\025\n\021PROCES" + + "S_ISOLATION\020\003\022\035\n\031PHYSICAL_MEMORY_ISOLATI" + + "ON\020\004\022\036\n\032PHYSICAL_NETWORK_ISOLATION\020\005\022\036\n\032" + + "VIRTUAL_RESOURCE_ISOLATION\020\006\022\037\n\033NETWORK_" + + "FUNCTIONS_ISOLATION\020\007\022\025\n\021SERVICE_ISOLATI" + + "ON\020\0102\331\023\n\016ContextService\022:\n\016ListContextId" + + "s\022\016.context.Empty\032\026.context.ContextIdLis" + + "t\"\000\0226\n\014ListContexts\022\016.context.Empty\032\024.co" + + "ntext.ContextList\"\000\0224\n\nGetContext\022\022.cont" + + "ext.ContextId\032\020.context.Context\"\000\0224\n\nSet" + + "Context\022\020.context.Context\032\022.context.Cont" + + "extId\"\000\0225\n\rRemoveContext\022\022.context.Conte" + + "xtId\032\016.context.Empty\"\000\022=\n\020GetContextEven" + + "ts\022\016.context.Empty\032\025.context.ContextEven" + + "t\"\0000\001\022@\n\017ListTopologyIds\022\022.context.Conte" + + "xtId\032\027.context.TopologyIdList\"\000\022=\n\016ListT" + + "opologies\022\022.context.ContextId\032\025.context." + + "TopologyList\"\000\0227\n\013GetTopology\022\023.context." + + "TopologyId\032\021.context.Topology\"\000\0227\n\013SetTo" + + "pology\022\021.context.Topology\032\023.context.Topo" + + "logyId\"\000\0227\n\016RemoveTopology\022\023.context.Top" + + "ologyId\032\016.context.Empty\"\000\022?\n\021GetTopology" + + "Events\022\016.context.Empty\032\026.context.Topolog" + + "yEvent\"\0000\001\0228\n\rListDeviceIds\022\016.context.Em" + + "pty\032\025.context.DeviceIdList\"\000\0224\n\013ListDevi" + + "ces\022\016.context.Empty\032\023.context.DeviceList" + + "\"\000\0221\n\tGetDevice\022\021.context.DeviceId\032\017.con" + + "text.Device\"\000\0221\n\tSetDevice\022\017.context.Dev" + + "ice\032\021.context.DeviceId\"\000\0223\n\014RemoveDevice" + + "\022\021.context.DeviceId\032\016.context.Empty\"\000\022;\n" + + "\017GetDeviceEvents\022\016.context.Empty\032\024.conte" + + "xt.DeviceEvent\"\0000\001\0224\n\013ListLinkIds\022\016.cont" + + "ext.Empty\032\023.context.LinkIdList\"\000\0220\n\tList" + + "Links\022\016.context.Empty\032\021.context.LinkList" + + "\"\000\022+\n\007GetLink\022\017.context.LinkId\032\r.context" + + ".Link\"\000\022+\n\007SetLink\022\r.context.Link\032\017.cont" + + "ext.LinkId\"\000\022/\n\nRemoveLink\022\017.context.Lin" + + "kId\032\016.context.Empty\"\000\0227\n\rGetLinkEvents\022\016" + + ".context.Empty\032\022.context.LinkEvent\"\0000\001\022>" + + "\n\016ListServiceIds\022\022.context.ContextId\032\026.c" + + "ontext.ServiceIdList\"\000\022:\n\014ListServices\022\022" + + ".context.ContextId\032\024.context.ServiceList" + + "\"\000\0224\n\nGetService\022\022.context.ServiceId\032\020.c" + + "ontext.Service\"\000\0224\n\nSetService\022\020.context" + + ".Service\032\022.context.ServiceId\"\000\0226\n\014UnsetS" + + "ervice\022\020.context.Service\032\022.context.Servi" + + "ceId\"\000\0225\n\rRemoveService\022\022.context.Servic" + + "eId\032\016.context.Empty\"\000\022=\n\020GetServiceEvent" + + "s\022\016.context.Empty\032\025.context.ServiceEvent" + + "\"\0000\001\022:\n\014ListSliceIds\022\022.context.ContextId" + + "\032\024.context.SliceIdList\"\000\0226\n\nListSlices\022\022" + + ".context.ContextId\032\022.context.SliceList\"\000" + + "\022.\n\010GetSlice\022\020.context.SliceId\032\016.context" + + ".Slice\"\000\022.\n\010SetSlice\022\016.context.Slice\032\020.c" + + "ontext.SliceId\"\000\0220\n\nUnsetSlice\022\016.context" + + ".Slice\032\020.context.SliceId\"\000\0221\n\013RemoveSlic" + + "e\022\020.context.SliceId\032\016.context.Empty\"\000\0229\n" + + "\016GetSliceEvents\022\016.context.Empty\032\023.contex" + + "t.SliceEvent\"\0000\001\022D\n\021ListConnectionIds\022\022." + + "context.ServiceId\032\031.context.ConnectionId" + + "List\"\000\022@\n\017ListConnections\022\022.context.Serv" + + "iceId\032\027.context.ConnectionList\"\000\022=\n\rGetC" + + "onnection\022\025.context.ConnectionId\032\023.conte" + + "xt.Connection\"\000\022=\n\rSetConnection\022\023.conte" + + "xt.Connection\032\025.context.ConnectionId\"\000\022;" + + "\n\020RemoveConnection\022\025.context.ConnectionI" + + "d\032\016.context.Empty\"\000\022C\n\023GetConnectionEven" + + "ts\022\016.context.Empty\032\030.context.ConnectionE" + + "vent\"\0000\001b\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, @@ -62331,7 +62538,7 @@ public final class ContextOuterClass { internal_static_context_DeviceEvent_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_context_DeviceEvent_descriptor, - new java.lang.String[] { "Event", "DeviceId", }); + new java.lang.String[] { "Event", "DeviceId", "DeviceConfig", }); internal_static_context_LinkId_descriptor = getDescriptor().getMessageTypes().get(20); internal_static_context_LinkId_fieldAccessorTable = new diff --git a/src/automation/target/generated-sources/grpc/context/ContextService.java b/src/automation/target/generated-sources/grpc/context/ContextService.java index d54c56057ca53e40071490d3b9aa313a13a77665..814ea98b65370f8fd3ffd752c77bec04997a5dd6 100644 --- a/src/automation/target/generated-sources/grpc/context/ContextService.java +++ b/src/automation/target/generated-sources/grpc/context/ContextService.java @@ -56,6 +56,8 @@ public interface ContextService extends MutinyService { io.smallrye.mutiny.Uni<context.ContextOuterClass.ServiceId> setService(context.ContextOuterClass.Service request); + io.smallrye.mutiny.Uni<context.ContextOuterClass.ServiceId> unsetService(context.ContextOuterClass.Service request); + io.smallrye.mutiny.Uni<context.ContextOuterClass.Empty> removeService(context.ContextOuterClass.ServiceId request); io.smallrye.mutiny.Uni<context.ContextOuterClass.SliceIdList> listSliceIds(context.ContextOuterClass.ContextId request); @@ -66,6 +68,8 @@ public interface ContextService extends MutinyService { io.smallrye.mutiny.Uni<context.ContextOuterClass.SliceId> setSlice(context.ContextOuterClass.Slice request); + io.smallrye.mutiny.Uni<context.ContextOuterClass.SliceId> unsetSlice(context.ContextOuterClass.Slice request); + io.smallrye.mutiny.Uni<context.ContextOuterClass.Empty> removeSlice(context.ContextOuterClass.SliceId request); io.smallrye.mutiny.Uni<context.ContextOuterClass.ConnectionIdList> listConnectionIds(context.ContextOuterClass.ServiceId request); diff --git a/src/automation/target/generated-sources/grpc/context/ContextServiceBean.java b/src/automation/target/generated-sources/grpc/context/ContextServiceBean.java index f552294b8e6d645af41cc30632ae0432504bbc67..2b0099f106265e34d1f60bb3e0ecdc35f81895ee 100644 --- a/src/automation/target/generated-sources/grpc/context/ContextServiceBean.java +++ b/src/automation/target/generated-sources/grpc/context/ContextServiceBean.java @@ -208,6 +208,14 @@ public class ContextServiceBean extends MutinyContextServiceGrpc.ContextServiceI } } @Override + public io.smallrye.mutiny.Uni<context.ContextOuterClass.ServiceId> unsetService(context.ContextOuterClass.Service request) { + try { + return delegate.unsetService(request); + } catch (UnsupportedOperationException e) { + throw new io.grpc.StatusRuntimeException(io.grpc.Status.UNIMPLEMENTED); + } + } + @Override public io.smallrye.mutiny.Uni<context.ContextOuterClass.Empty> removeService(context.ContextOuterClass.ServiceId request) { try { return delegate.removeService(request); @@ -248,6 +256,14 @@ public class ContextServiceBean extends MutinyContextServiceGrpc.ContextServiceI } } @Override + public io.smallrye.mutiny.Uni<context.ContextOuterClass.SliceId> unsetSlice(context.ContextOuterClass.Slice request) { + try { + return delegate.unsetSlice(request); + } catch (UnsupportedOperationException e) { + throw new io.grpc.StatusRuntimeException(io.grpc.Status.UNIMPLEMENTED); + } + } + @Override public io.smallrye.mutiny.Uni<context.ContextOuterClass.Empty> removeSlice(context.ContextOuterClass.SliceId request) { try { return delegate.removeSlice(request); diff --git a/src/automation/target/generated-sources/grpc/context/ContextServiceClient.java b/src/automation/target/generated-sources/grpc/context/ContextServiceClient.java index c6493bd4d381967238e5eb87dd717f679d028526..c518a0b4622522728e0eb22fdbeb80442b10f7ef 100644 --- a/src/automation/target/generated-sources/grpc/context/ContextServiceClient.java +++ b/src/automation/target/generated-sources/grpc/context/ContextServiceClient.java @@ -117,6 +117,10 @@ public class ContextServiceClient implements ContextService, MutinyClient<Mutiny return stub.setService(request); } @Override + public io.smallrye.mutiny.Uni<context.ContextOuterClass.ServiceId> unsetService(context.ContextOuterClass.Service request) { + return stub.unsetService(request); + } + @Override public io.smallrye.mutiny.Uni<context.ContextOuterClass.Empty> removeService(context.ContextOuterClass.ServiceId request) { return stub.removeService(request); } @@ -137,6 +141,10 @@ public class ContextServiceClient implements ContextService, MutinyClient<Mutiny return stub.setSlice(request); } @Override + public io.smallrye.mutiny.Uni<context.ContextOuterClass.SliceId> unsetSlice(context.ContextOuterClass.Slice request) { + return stub.unsetSlice(request); + } + @Override public io.smallrye.mutiny.Uni<context.ContextOuterClass.Empty> removeSlice(context.ContextOuterClass.SliceId request) { return stub.removeSlice(request); } diff --git a/src/automation/target/generated-sources/grpc/context/ContextServiceGrpc.java b/src/automation/target/generated-sources/grpc/context/ContextServiceGrpc.java index be720c127439e50f68c2518332f85f750d6579ee..f59378086c84d0776cc25fb7aa9640403b072c0f 100644 --- a/src/automation/target/generated-sources/grpc/context/ContextServiceGrpc.java +++ b/src/automation/target/generated-sources/grpc/context/ContextServiceGrpc.java @@ -882,6 +882,37 @@ public final class ContextServiceGrpc { return getSetServiceMethod; } + private static volatile io.grpc.MethodDescriptor<context.ContextOuterClass.Service, + context.ContextOuterClass.ServiceId> getUnsetServiceMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "UnsetService", + requestType = context.ContextOuterClass.Service.class, + responseType = context.ContextOuterClass.ServiceId.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor<context.ContextOuterClass.Service, + context.ContextOuterClass.ServiceId> getUnsetServiceMethod() { + io.grpc.MethodDescriptor<context.ContextOuterClass.Service, context.ContextOuterClass.ServiceId> getUnsetServiceMethod; + if ((getUnsetServiceMethod = ContextServiceGrpc.getUnsetServiceMethod) == null) { + synchronized (ContextServiceGrpc.class) { + if ((getUnsetServiceMethod = ContextServiceGrpc.getUnsetServiceMethod) == null) { + ContextServiceGrpc.getUnsetServiceMethod = getUnsetServiceMethod = + io.grpc.MethodDescriptor.<context.ContextOuterClass.Service, context.ContextOuterClass.ServiceId>newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "UnsetService")) + .setSampledToLocalTracing(true) + .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + context.ContextOuterClass.Service.getDefaultInstance())) + .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + context.ContextOuterClass.ServiceId.getDefaultInstance())) + .setSchemaDescriptor(new ContextServiceMethodDescriptorSupplier("UnsetService")) + .build(); + } + } + } + return getUnsetServiceMethod; + } + private static volatile io.grpc.MethodDescriptor<context.ContextOuterClass.ServiceId, context.ContextOuterClass.Empty> getRemoveServiceMethod; @@ -1068,6 +1099,37 @@ public final class ContextServiceGrpc { return getSetSliceMethod; } + private static volatile io.grpc.MethodDescriptor<context.ContextOuterClass.Slice, + context.ContextOuterClass.SliceId> getUnsetSliceMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "UnsetSlice", + requestType = context.ContextOuterClass.Slice.class, + responseType = context.ContextOuterClass.SliceId.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor<context.ContextOuterClass.Slice, + context.ContextOuterClass.SliceId> getUnsetSliceMethod() { + io.grpc.MethodDescriptor<context.ContextOuterClass.Slice, context.ContextOuterClass.SliceId> getUnsetSliceMethod; + if ((getUnsetSliceMethod = ContextServiceGrpc.getUnsetSliceMethod) == null) { + synchronized (ContextServiceGrpc.class) { + if ((getUnsetSliceMethod = ContextServiceGrpc.getUnsetSliceMethod) == null) { + ContextServiceGrpc.getUnsetSliceMethod = getUnsetSliceMethod = + io.grpc.MethodDescriptor.<context.ContextOuterClass.Slice, context.ContextOuterClass.SliceId>newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "UnsetSlice")) + .setSampledToLocalTracing(true) + .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + context.ContextOuterClass.Slice.getDefaultInstance())) + .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + context.ContextOuterClass.SliceId.getDefaultInstance())) + .setSchemaDescriptor(new ContextServiceMethodDescriptorSupplier("UnsetSlice")) + .build(); + } + } + } + return getUnsetSliceMethod; + } + private static volatile io.grpc.MethodDescriptor<context.ContextOuterClass.SliceId, context.ContextOuterClass.Empty> getRemoveSliceMethod; @@ -1560,6 +1622,13 @@ public final class ContextServiceGrpc { io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getSetServiceMethod(), responseObserver); } + /** + */ + public void unsetService(context.ContextOuterClass.Service request, + io.grpc.stub.StreamObserver<context.ContextOuterClass.ServiceId> responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getUnsetServiceMethod(), responseObserver); + } + /** */ public void removeService(context.ContextOuterClass.ServiceId request, @@ -1602,6 +1671,13 @@ public final class ContextServiceGrpc { io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getSetSliceMethod(), responseObserver); } + /** + */ + public void unsetSlice(context.ContextOuterClass.Slice request, + io.grpc.stub.StreamObserver<context.ContextOuterClass.SliceId> responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getUnsetSliceMethod(), responseObserver); + } + /** */ public void removeSlice(context.ContextOuterClass.SliceId request, @@ -1856,6 +1932,13 @@ public final class ContextServiceGrpc { context.ContextOuterClass.Service, context.ContextOuterClass.ServiceId>( this, METHODID_SET_SERVICE))) + .addMethod( + getUnsetServiceMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + context.ContextOuterClass.Service, + context.ContextOuterClass.ServiceId>( + this, METHODID_UNSET_SERVICE))) .addMethod( getRemoveServiceMethod(), io.grpc.stub.ServerCalls.asyncUnaryCall( @@ -1898,6 +1981,13 @@ public final class ContextServiceGrpc { context.ContextOuterClass.Slice, context.ContextOuterClass.SliceId>( this, METHODID_SET_SLICE))) + .addMethod( + getUnsetSliceMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + context.ContextOuterClass.Slice, + context.ContextOuterClass.SliceId>( + this, METHODID_UNSET_SLICE))) .addMethod( getRemoveSliceMethod(), io.grpc.stub.ServerCalls.asyncUnaryCall( @@ -2196,6 +2286,14 @@ public final class ContextServiceGrpc { getChannel().newCall(getSetServiceMethod(), getCallOptions()), request, responseObserver); } + /** + */ + public void unsetService(context.ContextOuterClass.Service request, + io.grpc.stub.StreamObserver<context.ContextOuterClass.ServiceId> responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getUnsetServiceMethod(), getCallOptions()), request, responseObserver); + } + /** */ public void removeService(context.ContextOuterClass.ServiceId request, @@ -2244,6 +2342,14 @@ public final class ContextServiceGrpc { getChannel().newCall(getSetSliceMethod(), getCallOptions()), request, responseObserver); } + /** + */ + public void unsetSlice(context.ContextOuterClass.Slice request, + io.grpc.stub.StreamObserver<context.ContextOuterClass.SliceId> responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getUnsetSliceMethod(), getCallOptions()), request, responseObserver); + } + /** */ public void removeSlice(context.ContextOuterClass.SliceId request, @@ -2523,6 +2629,13 @@ public final class ContextServiceGrpc { getChannel(), getSetServiceMethod(), getCallOptions(), request); } + /** + */ + public context.ContextOuterClass.ServiceId unsetService(context.ContextOuterClass.Service request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getUnsetServiceMethod(), getCallOptions(), request); + } + /** */ public context.ContextOuterClass.Empty removeService(context.ContextOuterClass.ServiceId request) { @@ -2566,6 +2679,13 @@ public final class ContextServiceGrpc { getChannel(), getSetSliceMethod(), getCallOptions(), request); } + /** + */ + public context.ContextOuterClass.SliceId unsetSlice(context.ContextOuterClass.Slice request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getUnsetSliceMethod(), getCallOptions(), request); + } + /** */ public context.ContextOuterClass.Empty removeSlice(context.ContextOuterClass.SliceId request) { @@ -2831,6 +2951,14 @@ public final class ContextServiceGrpc { getChannel().newCall(getSetServiceMethod(), getCallOptions()), request); } + /** + */ + public com.google.common.util.concurrent.ListenableFuture<context.ContextOuterClass.ServiceId> unsetService( + context.ContextOuterClass.Service request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getUnsetServiceMethod(), getCallOptions()), request); + } + /** */ public com.google.common.util.concurrent.ListenableFuture<context.ContextOuterClass.Empty> removeService( @@ -2871,6 +2999,14 @@ public final class ContextServiceGrpc { getChannel().newCall(getSetSliceMethod(), getCallOptions()), request); } + /** + */ + public com.google.common.util.concurrent.ListenableFuture<context.ContextOuterClass.SliceId> unsetSlice( + context.ContextOuterClass.Slice request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getUnsetSliceMethod(), getCallOptions()), request); + } + /** */ public com.google.common.util.concurrent.ListenableFuture<context.ContextOuterClass.Empty> removeSlice( @@ -2948,20 +3084,22 @@ public final class ContextServiceGrpc { private static final int METHODID_LIST_SERVICES = 25; private static final int METHODID_GET_SERVICE = 26; private static final int METHODID_SET_SERVICE = 27; - private static final int METHODID_REMOVE_SERVICE = 28; - private static final int METHODID_GET_SERVICE_EVENTS = 29; - private static final int METHODID_LIST_SLICE_IDS = 30; - private static final int METHODID_LIST_SLICES = 31; - private static final int METHODID_GET_SLICE = 32; - private static final int METHODID_SET_SLICE = 33; - private static final int METHODID_REMOVE_SLICE = 34; - private static final int METHODID_GET_SLICE_EVENTS = 35; - private static final int METHODID_LIST_CONNECTION_IDS = 36; - private static final int METHODID_LIST_CONNECTIONS = 37; - private static final int METHODID_GET_CONNECTION = 38; - private static final int METHODID_SET_CONNECTION = 39; - private static final int METHODID_REMOVE_CONNECTION = 40; - private static final int METHODID_GET_CONNECTION_EVENTS = 41; + private static final int METHODID_UNSET_SERVICE = 28; + private static final int METHODID_REMOVE_SERVICE = 29; + private static final int METHODID_GET_SERVICE_EVENTS = 30; + private static final int METHODID_LIST_SLICE_IDS = 31; + private static final int METHODID_LIST_SLICES = 32; + private static final int METHODID_GET_SLICE = 33; + private static final int METHODID_SET_SLICE = 34; + private static final int METHODID_UNSET_SLICE = 35; + private static final int METHODID_REMOVE_SLICE = 36; + private static final int METHODID_GET_SLICE_EVENTS = 37; + private static final int METHODID_LIST_CONNECTION_IDS = 38; + private static final int METHODID_LIST_CONNECTIONS = 39; + private static final int METHODID_GET_CONNECTION = 40; + private static final int METHODID_SET_CONNECTION = 41; + private static final int METHODID_REMOVE_CONNECTION = 42; + private static final int METHODID_GET_CONNECTION_EVENTS = 43; private static final class MethodHandlers<Req, Resp> implements io.grpc.stub.ServerCalls.UnaryMethod<Req, Resp>, @@ -3092,6 +3230,10 @@ public final class ContextServiceGrpc { serviceImpl.setService((context.ContextOuterClass.Service) request, (io.grpc.stub.StreamObserver<context.ContextOuterClass.ServiceId>) responseObserver); break; + case METHODID_UNSET_SERVICE: + serviceImpl.unsetService((context.ContextOuterClass.Service) request, + (io.grpc.stub.StreamObserver<context.ContextOuterClass.ServiceId>) responseObserver); + break; case METHODID_REMOVE_SERVICE: serviceImpl.removeService((context.ContextOuterClass.ServiceId) request, (io.grpc.stub.StreamObserver<context.ContextOuterClass.Empty>) responseObserver); @@ -3116,6 +3258,10 @@ public final class ContextServiceGrpc { serviceImpl.setSlice((context.ContextOuterClass.Slice) request, (io.grpc.stub.StreamObserver<context.ContextOuterClass.SliceId>) responseObserver); break; + case METHODID_UNSET_SLICE: + serviceImpl.unsetSlice((context.ContextOuterClass.Slice) request, + (io.grpc.stub.StreamObserver<context.ContextOuterClass.SliceId>) responseObserver); + break; case METHODID_REMOVE_SLICE: serviceImpl.removeSlice((context.ContextOuterClass.SliceId) request, (io.grpc.stub.StreamObserver<context.ContextOuterClass.Empty>) responseObserver); @@ -3237,12 +3383,14 @@ public final class ContextServiceGrpc { .addMethod(getListServicesMethod()) .addMethod(getGetServiceMethod()) .addMethod(getSetServiceMethod()) + .addMethod(getUnsetServiceMethod()) .addMethod(getRemoveServiceMethod()) .addMethod(getGetServiceEventsMethod()) .addMethod(getListSliceIdsMethod()) .addMethod(getListSlicesMethod()) .addMethod(getGetSliceMethod()) .addMethod(getSetSliceMethod()) + .addMethod(getUnsetSliceMethod()) .addMethod(getRemoveSliceMethod()) .addMethod(getGetSliceEventsMethod()) .addMethod(getListConnectionIdsMethod()) diff --git a/src/automation/target/generated-sources/grpc/context/MutinyContextServiceGrpc.java b/src/automation/target/generated-sources/grpc/context/MutinyContextServiceGrpc.java index 9f71b53786e40922546dc59cfd4328040a40bd7c..f7d2cb94e339366b54355c7e11b3ee72fa1e415c 100644 --- a/src/automation/target/generated-sources/grpc/context/MutinyContextServiceGrpc.java +++ b/src/automation/target/generated-sources/grpc/context/MutinyContextServiceGrpc.java @@ -156,6 +156,11 @@ public final class MutinyContextServiceGrpc implements io.quarkus.grpc.runtime.M } + public io.smallrye.mutiny.Uni<context.ContextOuterClass.ServiceId> unsetService(context.ContextOuterClass.Service request) { + return io.quarkus.grpc.runtime.ClientCalls.oneToOne(request, delegateStub::unsetService); + } + + public io.smallrye.mutiny.Uni<context.ContextOuterClass.Empty> removeService(context.ContextOuterClass.ServiceId request) { return io.quarkus.grpc.runtime.ClientCalls.oneToOne(request, delegateStub::removeService); } @@ -181,6 +186,11 @@ public final class MutinyContextServiceGrpc implements io.quarkus.grpc.runtime.M } + public io.smallrye.mutiny.Uni<context.ContextOuterClass.SliceId> unsetSlice(context.ContextOuterClass.Slice request) { + return io.quarkus.grpc.runtime.ClientCalls.oneToOne(request, delegateStub::unsetSlice); + } + + public io.smallrye.mutiny.Uni<context.ContextOuterClass.Empty> removeSlice(context.ContextOuterClass.SliceId request) { return io.quarkus.grpc.runtime.ClientCalls.oneToOne(request, delegateStub::removeSlice); } @@ -383,6 +393,11 @@ public final class MutinyContextServiceGrpc implements io.quarkus.grpc.runtime.M } + public io.smallrye.mutiny.Uni<context.ContextOuterClass.ServiceId> unsetService(context.ContextOuterClass.Service request) { + throw new io.grpc.StatusRuntimeException(io.grpc.Status.UNIMPLEMENTED); + } + + public io.smallrye.mutiny.Uni<context.ContextOuterClass.Empty> removeService(context.ContextOuterClass.ServiceId request) { throw new io.grpc.StatusRuntimeException(io.grpc.Status.UNIMPLEMENTED); } @@ -408,6 +423,11 @@ public final class MutinyContextServiceGrpc implements io.quarkus.grpc.runtime.M } + public io.smallrye.mutiny.Uni<context.ContextOuterClass.SliceId> unsetSlice(context.ContextOuterClass.Slice request) { + throw new io.grpc.StatusRuntimeException(io.grpc.Status.UNIMPLEMENTED); + } + + public io.smallrye.mutiny.Uni<context.ContextOuterClass.Empty> removeSlice(context.ContextOuterClass.SliceId request) { throw new io.grpc.StatusRuntimeException(io.grpc.Status.UNIMPLEMENTED); } @@ -670,6 +690,13 @@ public final class MutinyContextServiceGrpc implements io.quarkus.grpc.runtime.M context.ContextOuterClass.Service, context.ContextOuterClass.ServiceId>( this, METHODID_SET_SERVICE, compression))) + .addMethod( + context.ContextServiceGrpc.getUnsetServiceMethod(), + asyncUnaryCall( + new MethodHandlers< + context.ContextOuterClass.Service, + context.ContextOuterClass.ServiceId>( + this, METHODID_UNSET_SERVICE, compression))) .addMethod( context.ContextServiceGrpc.getRemoveServiceMethod(), asyncUnaryCall( @@ -712,6 +739,13 @@ public final class MutinyContextServiceGrpc implements io.quarkus.grpc.runtime.M context.ContextOuterClass.Slice, context.ContextOuterClass.SliceId>( this, METHODID_SET_SLICE, compression))) + .addMethod( + context.ContextServiceGrpc.getUnsetSliceMethod(), + asyncUnaryCall( + new MethodHandlers< + context.ContextOuterClass.Slice, + context.ContextOuterClass.SliceId>( + this, METHODID_UNSET_SLICE, compression))) .addMethod( context.ContextServiceGrpc.getRemoveSliceMethod(), asyncUnaryCall( @@ -800,20 +834,22 @@ public final class MutinyContextServiceGrpc implements io.quarkus.grpc.runtime.M private static final int METHODID_LIST_SERVICES = 25; private static final int METHODID_GET_SERVICE = 26; private static final int METHODID_SET_SERVICE = 27; - private static final int METHODID_REMOVE_SERVICE = 28; - private static final int METHODID_GET_SERVICE_EVENTS = 29; - private static final int METHODID_LIST_SLICE_IDS = 30; - private static final int METHODID_LIST_SLICES = 31; - private static final int METHODID_GET_SLICE = 32; - private static final int METHODID_SET_SLICE = 33; - private static final int METHODID_REMOVE_SLICE = 34; - private static final int METHODID_GET_SLICE_EVENTS = 35; - private static final int METHODID_LIST_CONNECTION_IDS = 36; - private static final int METHODID_LIST_CONNECTIONS = 37; - private static final int METHODID_GET_CONNECTION = 38; - private static final int METHODID_SET_CONNECTION = 39; - private static final int METHODID_REMOVE_CONNECTION = 40; - private static final int METHODID_GET_CONNECTION_EVENTS = 41; + private static final int METHODID_UNSET_SERVICE = 28; + private static final int METHODID_REMOVE_SERVICE = 29; + private static final int METHODID_GET_SERVICE_EVENTS = 30; + private static final int METHODID_LIST_SLICE_IDS = 31; + private static final int METHODID_LIST_SLICES = 32; + private static final int METHODID_GET_SLICE = 33; + private static final int METHODID_SET_SLICE = 34; + private static final int METHODID_UNSET_SLICE = 35; + private static final int METHODID_REMOVE_SLICE = 36; + private static final int METHODID_GET_SLICE_EVENTS = 37; + private static final int METHODID_LIST_CONNECTION_IDS = 38; + private static final int METHODID_LIST_CONNECTIONS = 39; + private static final int METHODID_GET_CONNECTION = 40; + private static final int METHODID_SET_CONNECTION = 41; + private static final int METHODID_REMOVE_CONNECTION = 42; + private static final int METHODID_GET_CONNECTION_EVENTS = 43; private static final class MethodHandlers<Req, Resp> implements io.grpc.stub.ServerCalls.UnaryMethod<Req, Resp>, @@ -1002,6 +1038,12 @@ public final class MutinyContextServiceGrpc implements io.quarkus.grpc.runtime.M compression, serviceImpl::setService); break; + case METHODID_UNSET_SERVICE: + io.quarkus.grpc.runtime.ServerCalls.oneToOne((context.ContextOuterClass.Service) request, + (io.grpc.stub.StreamObserver<context.ContextOuterClass.ServiceId>) responseObserver, + compression, + serviceImpl::unsetService); + break; case METHODID_REMOVE_SERVICE: io.quarkus.grpc.runtime.ServerCalls.oneToOne((context.ContextOuterClass.ServiceId) request, (io.grpc.stub.StreamObserver<context.ContextOuterClass.Empty>) responseObserver, @@ -1038,6 +1080,12 @@ public final class MutinyContextServiceGrpc implements io.quarkus.grpc.runtime.M compression, serviceImpl::setSlice); break; + case METHODID_UNSET_SLICE: + io.quarkus.grpc.runtime.ServerCalls.oneToOne((context.ContextOuterClass.Slice) request, + (io.grpc.stub.StreamObserver<context.ContextOuterClass.SliceId>) responseObserver, + compression, + serviceImpl::unsetSlice); + break; case METHODID_REMOVE_SLICE: io.quarkus.grpc.runtime.ServerCalls.oneToOne((context.ContextOuterClass.SliceId) request, (io.grpc.stub.StreamObserver<context.ContextOuterClass.Empty>) responseObserver, diff --git a/src/automation/target/kubernetes/kubernetes.yml b/src/automation/target/kubernetes/kubernetes.yml index 1fc788787ff527647cb920ffa74b270171ab1b6d..8bc14b935b4e4f4a18ed03f10cca0b74f480dcf0 100644 --- a/src/automation/target/kubernetes/kubernetes.yml +++ b/src/automation/target/kubernetes/kubernetes.yml @@ -3,20 +3,19 @@ apiVersion: v1 kind: Service metadata: annotations: - app.quarkus.io/commit-id: 80cfc0874138153f72a2a673fc4d040be707e899 - app.quarkus.io/build-timestamp: 2022-08-31 - 09:25:37 +0000 + app.quarkus.io/build-timestamp: 2022-09-19 - 10:48:18 +0000 labels: app.kubernetes.io/name: automationservice app: automationservice name: automationservice spec: ports: - - name: http - port: 8080 - targetPort: 8080 - name: grpc port: 5050 targetPort: 5050 + - name: http + port: 8080 + targetPort: 8080 selector: app.kubernetes.io/name: automationservice type: ClusterIP @@ -25,8 +24,7 @@ apiVersion: apps/v1 kind: Deployment metadata: annotations: - app.quarkus.io/commit-id: 80cfc0874138153f72a2a673fc4d040be707e899 - app.quarkus.io/build-timestamp: 2022-08-31 - 09:25:37 +0000 + app.quarkus.io/build-timestamp: 2022-09-19 - 10:48:18 +0000 labels: app: automationservice app.kubernetes.io/name: automationservice @@ -39,8 +37,7 @@ spec: template: metadata: annotations: - app.quarkus.io/commit-id: 80cfc0874138153f72a2a673fc4d040be707e899 - app.quarkus.io/build-timestamp: 2022-08-31 - 09:25:37 +0000 + app.quarkus.io/build-timestamp: 2022-09-19 - 10:48:18 +0000 labels: app: automationservice app.kubernetes.io/name: automationservice @@ -52,9 +49,9 @@ spec: fieldRef: fieldPath: metadata.namespace - name: CONTEXT_SERVICE_HOST - value: ContextService + value: contextservice - name: DEVICE_SERVICE_HOST - value: DeviceService + value: deviceservice image: registry.gitlab.com/teraflow-h2020/controller/automation:0.2.0 imagePullPolicy: Always livenessProbe: @@ -69,12 +66,12 @@ spec: timeoutSeconds: 10 name: automationservice ports: - - containerPort: 8080 - name: http - protocol: TCP - containerPort: 5050 name: grpc protocol: TCP + - containerPort: 8080 + name: http + protocol: TCP readinessProbe: failureThreshold: 3 httpGet: diff --git a/src/device/requirements.in b/src/device/requirements.in index 10506fbd42c5b7a64afb3cc7c6ea32e0f1fa49f6..9c8c0ef18f3bcd4a92180465d11cd465c4336d44 100644 --- a/src/device/requirements.in +++ b/src/device/requirements.in @@ -10,6 +10,9 @@ pytz==2021.3 redis==4.1.2 requests==2.27.1 xmltodict==0.12.0 +tabulate +ipaddress +macaddress # pip's dependency resolver does not take into account installed packages. # p4runtime does not specify the version of grpcio/protobuf it needs, so it tries to install latest one diff --git a/src/device/service/DeviceServiceServicerImpl.py b/src/device/service/DeviceServiceServicerImpl.py index 6189816bcd35dd973e4a7da389f256bdb685a79f..9ffd028a67a34cfcce7a737a5817128126941759 100644 --- a/src/device/service/DeviceServiceServicerImpl.py +++ b/src/device/service/DeviceServiceServicerImpl.py @@ -223,7 +223,7 @@ class DeviceServiceServicerImpl(DeviceServiceServicer): running_config_rules = driver.GetConfig() running_config_rules = [ (ORM_ConfigActionEnum.SET, config_rule[0], json.dumps(config_rule[1], sort_keys=True)) - for config_rule in running_config_rules + for config_rule in running_config_rules if not isinstance(config_rule[1], Exception) ] #for running_config_rule in running_config_rules: # LOGGER.info('[ConfigureDevice] running_config_rule: {:s}'.format(str(running_config_rule))) diff --git a/src/device/service/driver_api/_Driver.py b/src/device/service/driver_api/_Driver.py index 7dbb9eddb238dcaae9d00b579a1851aacf53225d..371f4cccb4e002e4d232823e47e31f577d1a4285 100644 --- a/src/device/service/driver_api/_Driver.py +++ b/src/device/service/driver_api/_Driver.py @@ -15,16 +15,18 @@ import threading from typing import Any, Iterator, List, Optional, Tuple, Union -# Special resource names to request to the driver to retrieve the specified configuration/structural resources. +# Special resource names to request to the driver to retrieve the specified +# configuration/structural resources. # These resource names should be used with GetConfig() method. -RESOURCE_ENDPOINTS = '__endpoints__' -RESOURCE_INTERFACES = '__interfaces__' +RESOURCE_ENDPOINTS = '__endpoints__' +RESOURCE_INTERFACES = '__interfaces__' RESOURCE_NETWORK_INSTANCES = '__network_instances__' -RESOURCE_ROUTING_POLICIES = '__routing_policies__' -RESOURCE_ACL = '__acl__' +RESOURCE_ROUTING_POLICIES = '__routing_policies__' +RESOURCE_ACL = '__acl__' + class _Driver: - def __init__(self, address : str, port : int, **settings) -> None: + def __init__(self, address: str, port: int, **settings) -> None: """ Initialize Driver. Parameters: address : str @@ -56,92 +58,122 @@ class _Driver: """ Retrieve initial configuration of entire device. Returns: values : List[Tuple[str, Any]] - List of tuples (resource key, resource value) for resource keys. + List of tuples (resource key, resource value) for + resource keys. """ raise NotImplementedError() - def GetConfig(self, resource_keys : List[str] = []) -> List[Tuple[str, Union[Any, None, Exception]]]: - """ Retrieve running configuration of entire device, or selected resource keys. + def GetConfig(self, resource_keys: List[str] = []) -> \ + List[Tuple[str, Union[Any, None, Exception]]]: + """ Retrieve running configuration of entire device or + selected resource keys. Parameters: resource_keys : List[str] List of keys pointing to the resources to be retrieved. Returns: values : List[Tuple[str, Union[Any, None, Exception]]] - List of tuples (resource key, resource value) for resource keys requested. If a resource is found, - the appropriate value type must be retrieved. If a resource is not found, None must be retrieved as - value for that resource. In case of Exception, the Exception must be retrieved as value. + List of tuples (resource key, resource value) for + resource keys requested. If a resource is found, + the appropriate value type must be retrieved. + If a resource is not found, None must be retrieved as + value for that resource. In case of Exception, + the Exception must be retrieved as value. """ raise NotImplementedError() - def SetConfig(self, resources : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: + def SetConfig(self, resources: List[Tuple[str, Any]]) -> \ + List[Union[bool, Exception]]: """ Create/Update configuration for a list of resources. Parameters: resources : List[Tuple[str, Any]] - List of tuples, each containing a resource_key pointing the resource to be modified, and a - resource_value containing the new value to be set. + List of tuples, each containing a resource_key pointing the + resource to be modified, and a resource_value containing + the new value to be set. Returns: results : List[Union[bool, Exception]] - List of results for resource key changes requested. Return values must be in the same order than - resource keys requested. If a resource is properly set, True must be retrieved; otherwise, the - Exception that is raised during the processing must be retrieved. + List of results for resource key changes requested. + Return values must be in the same order as the + resource keys requested. If a resource is properly set, + True must be retrieved; otherwise, the Exception that is + raised during the processing must be retrieved. """ raise NotImplementedError() - def DeleteConfig(self, resources : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: + def DeleteConfig(self, resources: List[Tuple[str, Any]]) -> \ + List[Union[bool, Exception]]: """ Delete configuration for a list of resources. Parameters: resources : List[Tuple[str, Any]] - List of tuples, each containing a resource_key pointing the resource to be modified, and a - resource_value containing possible additionally required values to locate the value to be removed. + List of tuples, each containing a resource_key pointing the + resource to be modified, and a resource_value containing + possible additionally required values to locate + the value to be removed. Returns: - results : List[bool] - List of results for resource key deletions requested. Return values must be in the same order than - resource keys requested. If a resource is properly deleted, True must be retrieved; otherwise, the - Exception that is raised during the processing must be retrieved. + results : List[Union[bool, Exception]] + List of results for resource key deletions requested. + Return values must be in the same order as the resource keys + requested. If a resource is properly deleted, True must be + retrieved; otherwise, the Exception that is raised during + the processing must be retrieved. """ raise NotImplementedError() - def SubscribeState(self, subscriptions : List[Tuple[str, float, float]]) -> List[Union[bool, Exception]]: - """ Subscribe to state information of entire device, or selected resources. Subscriptions are incremental. + def SubscribeState(self, subscriptions: List[Tuple[str, float, float]]) -> \ + List[Union[bool, Exception]]: + """ Subscribe to state information of entire device or + selected resources. Subscriptions are incremental. Driver should keep track of requested resources. Parameters: subscriptions : List[Tuple[str, float, float]] - List of tuples, each containing a resource_key pointing the resource to be subscribed, a - sampling_duration, and a sampling_interval (both in seconds with float representation) defining, - respectively, for how long monitoring should last, and the desired monitoring interval for the - resource specified. + List of tuples, each containing a resource_key pointing the + resource to be subscribed, a sampling_duration, and a + sampling_interval (both in seconds with float + representation) defining, respectively, for how long + monitoring should last, and the desired monitoring interval + for the resource specified. Returns: - results : List[bool] - List of results for resource key subscriptions requested. Return values must be in the same order - than resource keys requested. If a resource is properly subscribed, True must be retrieved; - otherwise, the Exception that is raised during the processing must be retrieved. + results : List[Union[bool, Exception]] + List of results for resource key subscriptions requested. + Return values must be in the same order as the resource keys + requested. If a resource is properly subscribed, + True must be retrieved; otherwise, the Exception that is + raised during the processing must be retrieved. """ raise NotImplementedError() - def UnsubscribeState(self, subscriptions : List[Tuple[str, float, float]]) -> List[Union[bool, Exception]]: - """ Unsubscribe from state information of entire device, or selected resources. Subscriptions are incremental. + def UnsubscribeState(self, subscriptions: List[Tuple[str, float, float]]) \ + -> List[Union[bool, Exception]]: + """ Unsubscribe from state information of entire device + or selected resources. Subscriptions are incremental. Driver should keep track of requested resources. Parameters: subscriptions : List[str] - List of tuples, each containing a resource_key pointing the resource to be subscribed, a - sampling_duration, and a sampling_interval (both in seconds with float representation) defining, - respectively, for how long monitoring should last, and the desired monitoring interval for the - resource specified. + List of tuples, each containing a resource_key pointing the + resource to be subscribed, a sampling_duration, and a + sampling_interval (both in seconds with float + representation) defining, respectively, for how long + monitoring should last, and the desired monitoring interval + for the resource specified. Returns: results : List[Union[bool, Exception]] - List of results for resource key unsubscriptions requested. Return values must be in the same order - than resource keys requested. If a resource is properly unsubscribed, True must be retrieved; - otherwise, the Exception that is raised during the processing must be retrieved. + List of results for resource key un-subscriptions requested. + Return values must be in the same order as the resource keys + requested. If a resource is properly unsubscribed, + True must be retrieved; otherwise, the Exception that is + raised during the processing must be retrieved. """ raise NotImplementedError() def GetState( self, blocking=False, terminate : Optional[threading.Event] = None ) -> Iterator[Tuple[float, str, Any]]: - """ Retrieve last collected values for subscribed resources. Operates as a generator, so this method should be - called once and will block until values are available. When values are available, it should yield each of - them and block again until new values are available. When the driver is destroyed, GetState() can return - instead of yield to terminate the loop. Terminate enables to request interruption of the generation. + """ Retrieve last collected values for subscribed resources. + Operates as a generator, so this method should be called once and will + block until values are available. When values are available, + it should yield each of them and block again until new values are + available. When the driver is destroyed, GetState() can return instead + of yield to terminate the loop. + Terminate enables to request interruption of the generation. Examples: # keep looping waiting for extra samples (generator loop) terminate = threading.Event() @@ -161,20 +193,27 @@ class _Driver: if i == 10: terminate.set() Parameters: blocking : bool - Select the driver behaviour. In both cases, the driver will first retrieve the samples accumulated - and available in the internal queue. Then, if blocking, the driver does not terminate the loop and - waits for additional samples to come, thus behaving as a generator. If non-blocking, the driver - terminates the loop and returns. Non-blocking behaviour can be used for periodically polling the - driver, while blocking can be used when a separate thread is in charge of collecting the samples - produced by the driver. + Select the driver behaviour. In both cases, the driver will + first retrieve the samples accumulated and available in the + internal queue. Then, if blocking, the driver does not + terminate the loop and waits for additional samples to come, + thus behaving as a generator. If non-blocking, the driver + terminates the loop and returns. Non-blocking behaviour can + be used for periodically polling the driver, while blocking + can be used when a separate thread is in charge of + collecting the samples produced by the driver. terminate : threading.Event - Signals the interruption of the GetState method as soon as possible. + Signals the interruption of the GetState method as soon as + possible. Returns: results : Iterator[Tuple[float, str, Any]] - Sequences of state sample. Each State sample contains a float Unix-like timestamps of the samples in - seconds with up to microsecond resolution, the resource_key of the sample, and its resource_value. - Only resources with an active subscription must be retrieved. Interval and duration of the sampling - process are specified when creating the subscription using method SubscribeState(). Order of values - yielded is arbitrary. + Sequences of state sample. Each State sample contains a + float Unix-like timestamps of the samples in seconds with up + to microsecond resolution, the resource_key of the sample, + and its resource_value. + Only resources with an active subscription must be + retrieved. Interval and duration of the sampling process are + specified when creating the subscription using method + SubscribeState(). Order of values yielded is arbitrary. """ raise NotImplementedError() diff --git a/src/device/service/drivers/__init__.py b/src/device/service/drivers/__init__.py index 75e315b37a41edacf6f200575eae5dc44c51d642..821a70418be7d323c5b1279c30a62fd9691e9e3f 100644 --- a/src/device/service/drivers/__init__.py +++ b/src/device/service/drivers/__init__.py @@ -12,15 +12,18 @@ # See the License for the specific language governing permissions and # limitations under the License. +import os from common.DeviceTypes import DeviceTypeEnum from ..driver_api.FilterFields import FilterFieldEnum, ORM_DeviceDriverEnum -from .emulated.EmulatedDriver import EmulatedDriver -from .openconfig.OpenConfigDriver import OpenConfigDriver -from .transport_api.TransportApiDriver import TransportApiDriver -from .p4.p4_driver import P4Driver -from .microwave.IETFApiDriver import IETFApiDriver -DRIVERS = [ +TRUE_VALUES = {'T', 'TRUE', 'YES', '1'} +DEVICE_EMULATED_ONLY = os.environ.get('DEVICE_EMULATED_ONLY') +LOAD_ALL_DEVICE_DRIVERS = (DEVICE_EMULATED_ONLY is None) or (DEVICE_EMULATED_ONLY.upper() not in TRUE_VALUES) + +DRIVERS = [] + +from .emulated.EmulatedDriver import EmulatedDriver # pylint: disable=wrong-import-position +DRIVERS.append( (EmulatedDriver, [ { # Driver==unspecified & no device type specified => use Emulated @@ -31,7 +34,6 @@ DRIVERS = [ FilterFieldEnum.DEVICE_TYPE: [ DeviceTypeEnum.EMULATED_OPEN_LINE_SYSTEM, DeviceTypeEnum.EMULATED_PACKET_ROUTER, - DeviceTypeEnum.PACKET_ROUTER, # temporal ECOC'22 ], FilterFieldEnum.DRIVER : [ ORM_DeviceDriverEnum.UNDEFINED, @@ -39,32 +41,47 @@ DRIVERS = [ ORM_DeviceDriverEnum.TRANSPORT_API ], } - ]), - #(OpenConfigDriver, [ # temporal ECOC'22 - # { - # # Real Packet Router, specifying OpenConfig Driver => use OpenConfigDriver - # FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.PACKET_ROUTER, - # FilterFieldEnum.DRIVER : ORM_DeviceDriverEnum.OPENCONFIG, - # } - #]), - (TransportApiDriver, [ - { - # Real OLS, specifying TAPI Driver => use TransportApiDriver - FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.OPEN_LINE_SYSTEM, - FilterFieldEnum.DRIVER : ORM_DeviceDriverEnum.TRANSPORT_API, - } - ]), - #(P4Driver, [ # temporal ECOC'22 - # { - # # Real P4 Switch, specifying P4 Driver => use P4Driver - # FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.P4_SWITCH, - # FilterFieldEnum.DRIVER : ORM_DeviceDriverEnum.P4, - # } - #]), - #(IETFApiDriver, [ # temporal ECOC'22 - # { - # FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.MICROVAWE_RADIO_SYSTEM, - # FilterFieldEnum.DRIVER : ORM_DeviceDriverEnum.IETF_NETWORK_TOPOLOGY, - # } - #]), -] + ])) + +if LOAD_ALL_DEVICE_DRIVERS: + from .openconfig.OpenConfigDriver import OpenConfigDriver # pylint: disable=wrong-import-position + DRIVERS.append( + (OpenConfigDriver, [ + { + # Real Packet Router, specifying OpenConfig Driver => use OpenConfigDriver + FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.PACKET_ROUTER, + FilterFieldEnum.DRIVER : ORM_DeviceDriverEnum.OPENCONFIG, + } + ])) + +if LOAD_ALL_DEVICE_DRIVERS: + from .transport_api.TransportApiDriver import TransportApiDriver # pylint: disable=wrong-import-position + DRIVERS.append( + (TransportApiDriver, [ + { + # Real OLS, specifying TAPI Driver => use TransportApiDriver + FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.OPEN_LINE_SYSTEM, + FilterFieldEnum.DRIVER : ORM_DeviceDriverEnum.TRANSPORT_API, + } + ])) + +if LOAD_ALL_DEVICE_DRIVERS: + from .p4.p4_driver import P4Driver # pylint: disable=wrong-import-position + DRIVERS.append( + (P4Driver, [ + { + # Real P4 Switch, specifying P4 Driver => use P4Driver + FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.P4_SWITCH, + FilterFieldEnum.DRIVER : ORM_DeviceDriverEnum.P4, + } + ])) + +if LOAD_ALL_DEVICE_DRIVERS: + from .microwave.IETFApiDriver import IETFApiDriver # pylint: disable=wrong-import-position + DRIVERS.append( + (IETFApiDriver, [ + { + FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.MICROVAWE_RADIO_SYSTEM, + FilterFieldEnum.DRIVER : ORM_DeviceDriverEnum.IETF_NETWORK_TOPOLOGY, + } + ])) diff --git a/src/device/service/drivers/openconfig/OpenConfigDriver.py b/src/device/service/drivers/openconfig/OpenConfigDriver.py index c35ae9b9dc610572f77f8e139c5c6f0c76f48e77..9342e650b9fadb21fa1b65fb951a08ae6f066a3c 100644 --- a/src/device/service/drivers/openconfig/OpenConfigDriver.py +++ b/src/device/service/drivers/openconfig/OpenConfigDriver.py @@ -61,6 +61,7 @@ class NetconfSessionHandler: self.__port = int(port) self.__username = settings.get('username') self.__password = settings.get('password') + self.__vendor = settings.get('vendor') self.__key_filename = settings.get('key_filename') self.__hostkey_verify = settings.get('hostkey_verify', True) self.__look_for_keys = settings.get('look_for_keys', True) @@ -94,6 +95,9 @@ class NetconfSessionHandler: @property def commit_per_rule(self): return self.__commit_per_delete + @property + def vendor(self): return self.__vendor + @RETRY_DECORATOR def get(self, filter=None, with_defaults=None): # pylint: disable=redefined-builtin with self.__lock: @@ -186,7 +190,8 @@ def do_sampling(samples_cache : SamplesCache, resource_key : str, out_samples : def edit_config( netconf_handler : NetconfSessionHandler, resources : List[Tuple[str, Any]], delete=False, commit_per_rule= False, - target='running', default_operation='merge', test_option=None, error_option=None, format='xml' # pylint: disable=redefined-builtin + target='running', default_operation='merge', test_option=None, error_option=None, + format='xml' # pylint: disable=redefined-builtin ): str_method = 'DeleteConfig' if delete else 'SetConfig' LOGGER.info('[{:s}] resources = {:s}'.format(str_method, str(resources))) @@ -199,7 +204,8 @@ def edit_config( chk_length(str_resource_name, resource, min_length=2, max_length=2) resource_key,resource_value = resource chk_string(str_resource_name + '.key', resource_key, allow_empty=False) - str_config_message = compose_config(resource_key, resource_value, delete=delete) + str_config_message = compose_config( + resource_key, resource_value, delete=delete, vendor=netconf_handler.vendor) if str_config_message is None: raise UnsupportedResourceKeyException(resource_key) LOGGER.info('[{:s}] str_config_message[{:d}] = {:s}'.format( str_method, len(str_config_message), str(str_config_message))) diff --git a/src/device/service/drivers/openconfig/templates/NetworkInstances.py b/src/device/service/drivers/openconfig/templates/NetworkInstances.py index 8b1587a0d59672ff08f777568feff776aa35cbd1..8399402fa76b8b6b00829493cc8ebd28fd6018f4 100644 --- a/src/device/service/drivers/openconfig/templates/NetworkInstances.py +++ b/src/device/service/drivers/openconfig/templates/NetworkInstances.py @@ -145,4 +145,7 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]: iip_ap['name'], iip_ap['export_policy']) response.append((resource_key, iip_ap)) + + + return response diff --git a/src/device/service/drivers/openconfig/templates/RoutingPolicy.py b/src/device/service/drivers/openconfig/templates/RoutingPolicy.py index 369732de3fe58c52a2e9ab2227899160d091ff68..068ca5430d9135e784dbe9a07f80d81472cbf5cc 100644 --- a/src/device/service/drivers/openconfig/templates/RoutingPolicy.py +++ b/src/device/service/drivers/openconfig/templates/RoutingPolicy.py @@ -74,7 +74,7 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]: resource_key = '/routing_policy/bgp_defined_set[{:s}]'.format(bgp_ext_community_set['ext_community_set_name']) response.append((resource_key, copy.deepcopy(bgp_ext_community_set))) - ext_community_member = xml_bgp_ext_community_set.find('ocbp:ext-community-member', namespaces=NAMESPACES) + ext_community_member = xml_bgp_ext_community_set.find('ocbp:config/ocbp:ext-community-member', namespaces=NAMESPACES) if ext_community_member is not None and ext_community_member.text is not None: add_value_from_tag(bgp_ext_community_set, 'ext_community_member', ext_community_member) diff --git a/src/device/service/drivers/openconfig/templates/__init__.py b/src/device/service/drivers/openconfig/templates/__init__.py index 901f5cf0291dca1bda155e20abd16db5989df7dc..5e77b25fe3206407db9427085de70b95342d370a 100644 --- a/src/device/service/drivers/openconfig/templates/__init__.py +++ b/src/device/service/drivers/openconfig/templates/__init__.py @@ -13,7 +13,7 @@ # limitations under the License. import json, logging, lxml.etree as ET, re -from typing import Any, Dict +from typing import Any, Dict, Optional from jinja2 import Environment, PackageLoader, select_autoescape from device.service.driver_api._Driver import ( RESOURCE_ENDPOINTS, RESOURCE_INTERFACES, RESOURCE_NETWORK_INSTANCES, RESOURCE_ROUTING_POLICIES, RESOURCE_ACL) @@ -77,9 +77,11 @@ def parse(resource_key : str, xml_data : ET.Element): if parser is None: return [(resource_key, xml_data)] return parser(xml_data) -def compose_config(resource_key : str, resource_value : str, delete : bool = False) -> str: +def compose_config( + resource_key : str, resource_value : str, delete : bool = False, vendor : Optional[str] = None +) -> str: template_name = '{:s}/edit_config.xml'.format(RE_REMOVE_FILTERS.sub('', resource_key)) template = JINJA_ENV.get_template(template_name) data : Dict[str, Any] = json.loads(resource_value) operation = 'delete' if delete else 'merge' - return '<config>{:s}</config>'.format(template.render(**data, operation=operation).strip()) + return '<config>{:s}</config>'.format(template.render(**data, operation=operation, vendor=vendor).strip()) diff --git a/src/device/service/drivers/openconfig/templates/acl/acl-set/acl-entry/edit_config.xml b/src/device/service/drivers/openconfig/templates/acl/acl-set/acl-entry/edit_config.xml index fac259b6fdcd3cbded93088ddc6335ea2bfe5f69..2769e8b2e9f81326332ae175f915432b7337f24c 100644 --- a/src/device/service/drivers/openconfig/templates/acl/acl-set/acl-entry/edit_config.xml +++ b/src/device/service/drivers/openconfig/templates/acl/acl-set/acl-entry/edit_config.xml @@ -13,6 +13,16 @@ <config> <sequence-id>{{sequence_id}}</sequence-id> </config> + {% if operation is not defined or operation != 'delete' %} + {% if type=='ACL_L2' %} + <l2> + <config> + {% if source_address is defined %}<source-mac>{{source_address}}</source-mac>{% endif%} + {% if destination_address is defined %}<destination-mac>{{destination_address}}</destination-mac>{% endif%} + </config> + </l2> + {% endif%} + {% if type=='ACL_IPV4' %} <ipv4> <config> {% if source_address is defined %}<source-address>{{source_address}}</source-address>{% endif%} @@ -29,12 +39,26 @@ {% if tcp_flags is defined %}<tcp-flags>{{tcp_flags}}</tcp-flags>{% endif%} </config> </transport> + {% endif%} + {% if type=='ACL_IPV6' %} + <ipv6> + <config> + {% if source_address is defined %}<source-address>{{source_address}}</source-address>{% endif%} + {% if destination_address is defined %}<destination-address>{{destination_address}}</destination-address>{% endif%} + {% if protocol is defined %}<protocol>{{protocol}}</protocol>{% endif%} + {% if dscp is defined %}<dscp>{{dscp}}</dscp>{% endif%} + {% if hop_limit is defined %}<hop-limit>{{hop_limit}}</hop-limit>{% endif%} + </config> + </ipv6> + {% endif%} + <actions> <config> {% if forwarding_action is defined %}<forwarding-action>{{forwarding_action}}</forwarding-action>{% endif%} {% if log_action is defined %}<log-action>{{log_action}}</log-action>{% endif%} </config> </actions> + {% endif%} </acl-entry> </acl-entries> </acl-set> diff --git a/src/device/service/drivers/openconfig/templates/acl/interfaces/egress/edit_config.xml b/src/device/service/drivers/openconfig/templates/acl/interfaces/egress/edit_config.xml index d987b0cc4b40298533f140f71af83c6fad884020..b070b305a505890c51f3751d2b83eb415ae4aa43 100644 --- a/src/device/service/drivers/openconfig/templates/acl/interfaces/egress/edit_config.xml +++ b/src/device/service/drivers/openconfig/templates/acl/interfaces/egress/edit_config.xml @@ -1,18 +1,21 @@ <acl xmlns="http://openconfig.net/yang/acl"> <interfaces> - <interface{% if operation is defined %} xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0" nc:operation="{{operation}}"{% endif %}> + <interface {% if operation is defined %}{% if all is defined %} xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0" nc:operation="{{operation}}"{% endif %} {% endif %}> <id>{{id}}</id> <config> <id>{{id}}</id> </config> + {% if interface is defined %} <interface-ref> <config> <interface>{{interface}}</interface> {% if subinterface is defined %}<subinterface>{{subinterface}}</subinterface>{% endif%} </config> </interface-ref> + {% endif%} + {% if set_name_egress is defined %} <egress-acl-sets> - <egress-acl-set> + <egress-acl-set {% if operation is defined %}{% if egress is defined %} xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0" nc:operation="{{operation}}"{% endif %} {% endif %}>> <set-name>{{set_name_egress}}</set-name> <type>{{type_egress}}</type> <config> @@ -21,6 +24,7 @@ </config> </egress-acl-set> </egress-acl-sets> + {% endif%} </interface> </interfaces> </acl> diff --git a/src/device/service/drivers/openconfig/templates/acl/interfaces/ingress/edit_config.xml b/src/device/service/drivers/openconfig/templates/acl/interfaces/ingress/edit_config.xml index 144a03c55477e532379541be5443063fe3aa2f10..d1f18efb26bc1316354c2bb26623cb36f7dc0be6 100644 --- a/src/device/service/drivers/openconfig/templates/acl/interfaces/ingress/edit_config.xml +++ b/src/device/service/drivers/openconfig/templates/acl/interfaces/ingress/edit_config.xml @@ -1,18 +1,21 @@ <acl xmlns="http://openconfig.net/yang/acl"> <interfaces> - <interface{% if operation is defined %} xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0" nc:operation="{{operation}}"{% endif %}> + <interface {% if operation is defined %}{% if all is defined %} xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0" nc:operation="{{operation}}"{% endif %} {% endif %}> <id>{{id}}</id> <config> <id>{{id}}</id> </config> + {% if interface is defined %} <interface-ref> <config> <interface>{{interface}}</interface> {% if subinterface is defined %}<subinterface>{{subinterface}}</subinterface>{% endif%} </config> </interface-ref> + {% endif%} + {% if set_name_ingress is defined %} <ingress-acl-sets> - <ingress-acl-set> + <ingress-acl-set {% if operation is defined %}{% if ingress is defined %} xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0" nc:operation="{{operation}}"{% endif %} {% endif %}> <set-name>{{set_name_ingress}}</set-name> <type>{{type_ingress}}</type> <config> @@ -21,6 +24,7 @@ </config> </ingress-acl-set> </ingress-acl-sets> + {% endif%} </interface> </interfaces> </acl> diff --git a/src/device/service/drivers/openconfig/templates/interface/edit_config.xml b/src/device/service/drivers/openconfig/templates/interface/edit_config.xml index ff15d1d682ea910208237c32adcc93029fb036d8..4bc53ff1ddfbebbdcef2a0b4c37770210726676b 100644 --- a/src/device/service/drivers/openconfig/templates/interface/edit_config.xml +++ b/src/device/service/drivers/openconfig/templates/interface/edit_config.xml @@ -1,14 +1,12 @@ <interfaces xmlns="http://openconfig.net/yang/interfaces"> - <interface{% if operation is defined and operation != 'delete' %} xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0" nc:operation="{{operation}}"{% endif %}> + <interface{% if operation is defined %} xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0" nc:operation="{{operation}}"{% endif %}> <name>{{name}}</name> + {% if operation is defined and operation != 'delete' %} <config> <name>{{name}}</name> - {% if operation is defined and operation == 'delete' %} <description></description> - {% else %} - <description>{{description}}</description> <mtu>{{mtu}}</mtu> - {% endif %} </config> + {% endif %} </interface> </interfaces> diff --git a/src/device/service/drivers/openconfig/templates/interface/subinterface/edit_config.xml b/src/device/service/drivers/openconfig/templates/interface/subinterface/edit_config.xml index 17b7708fac6bd920bb6c6e01feb526d59f15f608..1bdb8efbff495f04ee90dadaffaa7412332531b7 100644 --- a/src/device/service/drivers/openconfig/templates/interface/subinterface/edit_config.xml +++ b/src/device/service/drivers/openconfig/templates/interface/subinterface/edit_config.xml @@ -4,9 +4,9 @@ <name>{{name}}</name> <config> <name>{{name}}</name> - <type xmlns:ianaift="urn:ietf:params:xml:ns:yang:iana-if-type">ianaift:{{type}}</type> - {% if mtu is defined %}<mtu>{{mtu}}</mtu>{% endif%} - <enabled>true</enabled> + <type xmlns:ianaift="urn:ietf:params:xml:ns:yang:iana-if-type">ianaift:{{type}}</type> + {% if mtu is defined %}<mtu>{{mtu}}</mtu>{% endif%} + <enabled>true</enabled> </config> <subinterfaces> <subinterface> @@ -14,7 +14,11 @@ <config> <index>{{index}}</index> <description>{{description}}</description> + {% if vendor=="ADVA" and vlan_id is not defined %} + <untagged-allowed xmlns="http://www.advaoptical.com/cim/adva-dnos-oc-interfaces">true</untagged-allowed> + {% endif%} </config> + {% if vlan_id is defined %} <vlan xmlns="http://openconfig.net/yang/vlan"> <match> <single-tagged> @@ -24,7 +28,8 @@ </single-tagged> </match> </vlan> - {% if address_ip is defined %} + {% endif %} + {% if address_ip is defined %} <oc-ip:ipv4> <oc-ip:addresses> <oc-ip:address> diff --git a/src/device/service/drivers/openconfig/templates/network_instance/edit_config.xml b/src/device/service/drivers/openconfig/templates/network_instance/edit_config.xml index 1944778c6fc4fcee96e79f3ce93ce044869226e5..17b07df7233e94f16923c5da49eef2b8b5ccda82 100644 --- a/src/device/service/drivers/openconfig/templates/network_instance/edit_config.xml +++ b/src/device/service/drivers/openconfig/templates/network_instance/edit_config.xml @@ -20,7 +20,7 @@ {% endif %} {% if type=='L2VSI' %} {% if description is defined %}<description>{{description}}</description>{% endif %} - <enabled>false</enabled> + <enabled>true</enabled> <mtu>1500</mtu> </config> <encapsulation> diff --git a/src/device/service/drivers/openconfig/templates/network_instance/interface/edit_config.xml b/src/device/service/drivers/openconfig/templates/network_instance/interface/edit_config.xml index d5c33d31a6d671216db55c0eded94dc15a56bec8..bf8c0c0770f9344fbed16f3a6b09f7fa99a978ef 100644 --- a/src/device/service/drivers/openconfig/templates/network_instance/interface/edit_config.xml +++ b/src/device/service/drivers/openconfig/templates/network_instance/interface/edit_config.xml @@ -2,15 +2,13 @@ <network-instance> <name>{{name}}</name> <interfaces> - <interface{% if operation is defined %} xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0" nc:operation="{{operation}}"{% endif %}> + <interface> <id>{{id}}</id> - {% if operation is not defined or operation != 'delete' %} <config> <id>{{id}}</id> <interface>{{interface}}</interface> <subinterface>{{subinterface}}</subinterface> </config> - {% endif %} </interface> </interfaces> </network-instance> diff --git a/src/device/service/drivers/openconfig/templates/network_instance/protocols/edit_config.xml b/src/device/service/drivers/openconfig/templates/network_instance/protocols/edit_config.xml index da05d0467605e6cec0c3448cc325ff60dfc7cfc9..c9c068e480c0569cfe5f97b78b28fbe03e2595f8 100644 --- a/src/device/service/drivers/openconfig/templates/network_instance/protocols/edit_config.xml +++ b/src/device/service/drivers/openconfig/templates/network_instance/protocols/edit_config.xml @@ -3,19 +3,19 @@ <name>{{name}}</name> <protocols> <protocol{% if operation is defined %} xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0" nc:operation="{{operation}}"{% endif %}> - <identifier>{{identifier}}</identifier> + <identifier xmlns:oc-pol-types="http://openconfig.net/yang/policy-types">oc-pol-types:{{identifier}}</identifier> <name>{{protocol_name}}</name> {% if operation is not defined or operation != 'delete' %} <config> - <identifier>{{identifier}}</identifier> + <identifier xmlns:oc-pol-types="http://openconfig.net/yang/policy-types">oc-pol-types:{{identifier}}</identifier> <name>{{protocol_name}}</name> - <enabled>true</enabled> </config> {% if identifier=='BGP' %} <bgp> <global> <config> <as>{{as}}</as> + <router-id>{{router_id}}</router-id> </config> </global> </bgp> @@ -23,5 +23,18 @@ {% endif %} </protocol> </protocols> + {% if operation is not defined or operation != 'delete' %} + + <tables> + <table> + <protocol xmlns:oc-pol-types="http://openconfig.net/yang/policy-types">oc-pol-types:{{identifier}}</protocol> + <address-family xmlns:oc-types="http://openconfig.net/yang/openconfig-types">oc-types:IPV4</address-family> + <config> + <protocol xmlns:oc-pol-types="http://openconfig.net/yang/policy-types">oc-pol-types:{{identifier}}</protocol> + <address-family xmlns:oc-types="http://openconfig.net/yang/openconfig-types">oc-types:IPV4</address-family> + </config> + </table> + </tables> + {% endif %} </network-instance> </network-instances> diff --git a/src/device/service/drivers/openconfig/templates/routing_policy/bgp_defined_set/edit_config.xml b/src/device/service/drivers/openconfig/templates/routing_policy/bgp_defined_set/edit_config.xml index df64606ae5ab434e5e3453f7294db02bb749bdce..6843c2dcbd306b149a4168565447d11174eceadc 100644 --- a/src/device/service/drivers/openconfig/templates/routing_policy/bgp_defined_set/edit_config.xml +++ b/src/device/service/drivers/openconfig/templates/routing_policy/bgp_defined_set/edit_config.xml @@ -5,7 +5,10 @@ <ext-community-set{% if operation is defined %} xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0" nc:operation="{{operation}}"{% endif %}> <ext-community-set-name>{{ext_community_set_name}}</ext-community-set-name> {% if operation is not defined or operation != 'delete' %} - {% if ext_community_member is defined %} <ext-community-member>{{ext_community_member}}</ext-community-member>{% endif %} + <config> + <ext-community-set-name>{{ext_community_set_name}}</ext-community-set-name> + <ext-community-member>{{ext_community_member}}</ext-community-member> + </config> {% endif %} </ext-community-set> </ext-community-sets> diff --git a/src/device/service/drivers/openconfig/templates/routing_policy/policy_definition/statement/edit_config.xml b/src/device/service/drivers/openconfig/templates/routing_policy/policy_definition/statement/edit_config.xml index 711067f424b68da0e69913ce01f5133c5cbbfe02..eda2d99c9f6299f7345767db8bed8e8cc58284ae 100644 --- a/src/device/service/drivers/openconfig/templates/routing_policy/policy_definition/statement/edit_config.xml +++ b/src/device/service/drivers/openconfig/templates/routing_policy/policy_definition/statement/edit_config.xml @@ -1,8 +1,11 @@ -{% if operation is not defined or operation != 'delete' %} <routing-policy xmlns="http://openconfig.net/yang/routing-policy"> <policy-definitions> - <policy-definition> + <policy-definition {% if operation is defined %} xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0" nc:operation="{{operation}}"{% endif %}> <name>{{policy_name}}</name> + {% if operation is not defined or operation != 'delete' %} + <config> + <name>{{policy_name}}</name> + </config> <statements> <statement> <name>{{statement_name}}</name> @@ -10,11 +13,13 @@ <name>{{statement_name}}</name> </config> <conditions> + <config> + <install-protocol-eq xmlns:oc-pol-types="http://openconfig.net/yang/policy-types">oc-pol-types:DIRECTLY_CONNECTED</install-protocol-eq> + </config> <bgp-conditions xmlns="http://openconfig.net/yang/bgp-policy"> - <match-ext-community-set> + <config> <ext-community-set>{{ext_community_set_name}}</ext-community-set> - <match-set-options>{{match_set_options}}</match-set-options> - </match-ext-community-set> + </config> </bgp-conditions> </conditions> <actions> @@ -24,7 +29,7 @@ </actions> </statement> </statements> + {% endif %} </policy-definition> </policy-definitions> </routing-policy> -{% endif %} diff --git a/src/device/service/drivers/p4/__init__.py b/src/device/service/drivers/p4/__init__.py index 70a33251242c51f49140e596b8208a19dd5245f7..9953c820575d42fa88351cc8de022d880ba96e6a 100644 --- a/src/device/service/drivers/p4/__init__.py +++ b/src/device/service/drivers/p4/__init__.py @@ -11,4 +11,3 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - diff --git a/src/device/service/drivers/p4/p4_client.py b/src/device/service/drivers/p4/p4_client.py new file mode 100644 index 0000000000000000000000000000000000000000..600d08880c7e8a1d6a7238e60d66a87d7167bd8c --- /dev/null +++ b/src/device/service/drivers/p4/p4_client.py @@ -0,0 +1,607 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +P4Runtime client. +""" + +import logging +import queue +import sys +import enum +import threading +from functools import wraps +from typing import NamedTuple +import grpc +import google.protobuf.text_format +from google.rpc import status_pb2, code_pb2 + +from p4.v1 import p4runtime_pb2 +from p4.v1 import p4runtime_pb2_grpc + +STREAM_ATTR_ARBITRATION = "arbitration" +STREAM_ATTR_PACKET = "packet" +STREAM_ATTR_DIGEST = "digest" +STREAM_ATTR_IDLE_NOT = "idle_timeout_notification" +STREAM_ATTR_UNKNOWN = "unknown" + +LOGGER = logging.getLogger(__name__) + + +class P4RuntimeErrorFormatException(Exception): + """ + P4Runtime error format exception. + """ + + +# Used to iterate over the p4.Error messages in a gRPC error Status object +class P4RuntimeErrorIterator: + """ + P4Runtime error iterator. + + Attributes + ---------- + grpc_error : object + gRPC error + """ + + def __init__(self, grpc_error): + assert grpc_error.code() == grpc.StatusCode.UNKNOWN + self.grpc_error = grpc_error + + error = None + # The gRPC Python package does not have a convenient way to access the + # binary details for the error: they are treated as trailing metadata. + for meta in self.grpc_error.trailing_metadata(): + if meta[0] == "grpc-status-details-bin": + error = status_pb2.Status() + error.ParseFromString(meta[1]) + break + if error is None: + raise P4RuntimeErrorFormatException("No binary details field") + + if len(error.details) == 0: + raise P4RuntimeErrorFormatException( + "Binary details field has empty Any details repeated field") + self.errors = error.details + self.idx = 0 + + def __iter__(self): + return self + + def __next__(self): + while self.idx < len(self.errors): + p4_error = p4runtime_pb2.Error() + one_error_any = self.errors[self.idx] + if not one_error_any.Unpack(p4_error): + raise P4RuntimeErrorFormatException( + "Cannot convert Any message to p4.Error") + if p4_error.canonical_code == code_pb2.OK: + continue + val = self.idx, p4_error + self.idx += 1 + return val + raise StopIteration + + +class P4RuntimeWriteException(Exception): + """ + P4Runtime write exception handler. + + Attributes + ---------- + grpc_error : object + gRPC error + """ + + def __init__(self, grpc_error): + assert grpc_error.code() == grpc.StatusCode.UNKNOWN + super().__init__() + self.errors = [] + try: + error_iterator = P4RuntimeErrorIterator(grpc_error) + for error_tuple in error_iterator: + self.errors.append(error_tuple) + except P4RuntimeErrorFormatException as ex: + raise P4RuntimeException(grpc_error) from ex + + def __str__(self): + message = "Error(s) during Write:\n" + for idx, p4_error in self.errors: + code_name = code_pb2._CODE.values_by_number[ + p4_error.canonical_code].name + message += f"\t* At index {idx}: {code_name}, " \ + f"'{p4_error.message}'\n" + return message + + +class P4RuntimeException(Exception): + """ + P4Runtime exception handler. + + Attributes + ---------- + grpc_error : object + gRPC error + """ + + def __init__(self, grpc_error): + super().__init__() + self.grpc_error = grpc_error + + def __str__(self): + message = f"P4Runtime RPC error ({self.grpc_error.code().name}): " \ + f"{self.grpc_error.details()}" + return message + + +def parse_p4runtime_write_error(func): + """ + Parse P4Runtime write error. + + :param func: function + :return: parsed error + """ + + @wraps(func) + def handle(*args, **kwargs): + try: + return func(*args, **kwargs) + except grpc.RpcError as ex: + if ex.code() != grpc.StatusCode.UNKNOWN: + raise ex + raise P4RuntimeWriteException(ex) from None + + return handle + + +def parse_p4runtime_error(func): + """ + Parse P4Runtime error. + + :param func: function + :return: parsed error + """ + + @wraps(func) + def handle(*args, **kwargs): + try: + return func(*args, **kwargs) + except grpc.RpcError as ex: + raise P4RuntimeException(ex) from None + + return handle + + +class SSLOptions(NamedTuple): + """ + Tuple of SSL options. + """ + insecure: bool + cacert: str = None + cert: str = None + key: str = None + + +def read_pem_file(path): + """ + Load and read PEM file. + + :param path: path to PEM file + :return: file descriptor + """ + try: + with open(path, "rb") as f_d: + return f_d.read() + except (FileNotFoundError, IOError, OSError): + logging.critical("Cannot read from PEM file '%s'", path) + sys.exit(1) + + +@enum.unique +class WriteOperation(enum.Enum): + """ + Write Operations. + """ + insert = 1 + update = 2 + delete = 3 + + +def select_operation(mode): + """ + Select P4 operation based upon the operation mode. + + :param mode: operation mode + :return: P4 operation protobuf object + """ + if mode == WriteOperation.insert: + return p4runtime_pb2.Update.INSERT + if mode == WriteOperation.update: + return p4runtime_pb2.Update.UPDATE + if mode == WriteOperation.delete: + return p4runtime_pb2.Update.DELETE + return None + + +def select_entity_type(entity, update): + """ + Select P4 entity type for an update. + + :param entity: P4 entity object + :param update: update operation + :return: the correct update entity or None + """ + if isinstance(entity, p4runtime_pb2.TableEntry): + return update.entity.table_entry + if isinstance(entity, p4runtime_pb2.ActionProfileGroup): + return update.entity.action_profile_group + if isinstance(entity, p4runtime_pb2.ActionProfileMember): + return update.entity.action_profile_member + return None + + +class P4RuntimeClient: + """ + P4Runtime client. + + Attributes + ---------- + device_id : int + P4 device ID + grpc_address : str + IP address and port + election_id : tuple + Mastership election ID + role_name : str + Role name (optional) + ssl_options: tuple + SSL options" named tuple (optional) + """ + + def __init__(self, device_id, grpc_address, + election_id, role_name=None, ssl_options=None): + self.device_id = device_id + self.election_id = election_id + self.role_name = role_name + if ssl_options is None: + self.ssl_options = SSLOptions(True) + else: + self.ssl_options = ssl_options + LOGGER.debug( + "Connecting to device %d at %s", device_id, grpc_address) + + if self.ssl_options.insecure: + logging.debug("Using insecure channel") + self.channel = grpc.insecure_channel(grpc_address) + else: + # root certificates are retrieved from a default location + # chosen by gRPC runtime unless the user provides + # custom certificates. + root_certificates = None + if self.ssl_options.cacert is not None: + root_certificates = read_pem_file(self.ssl_options.cacert) + certificate_chain = None + if self.ssl_options.cert is not None: + certificate_chain = read_pem_file(self.ssl_options.cert) + private_key = None + if self.ssl_options.key is not None: + private_key = read_pem_file(self.ssl_options.key) + creds = grpc.ssl_channel_credentials(root_certificates, private_key, + certificate_chain) + self.channel = grpc.secure_channel(grpc_address, creds) + self.stream_in_q = None + self.stream_out_q = None + self.stream = None + self.stream_recv_thread = None + self.stub = p4runtime_pb2_grpc.P4RuntimeStub(self.channel) + + try: + self.set_up_stream() + except P4RuntimeException: + LOGGER.critical("Failed to connect to P4Runtime server") + sys.exit(1) + LOGGER.info("P4Runtime client is successfully invoked") + + def set_up_stream(self): + """ + Set up a gRPC stream. + """ + self.stream_out_q = queue.Queue() + # queues for different messages + self.stream_in_q = { + STREAM_ATTR_ARBITRATION: queue.Queue(), + STREAM_ATTR_PACKET: queue.Queue(), + STREAM_ATTR_DIGEST: queue.Queue(), + STREAM_ATTR_IDLE_NOT: queue.Queue(), + STREAM_ATTR_UNKNOWN: queue.Queue(), + } + + def stream_req_iterator(): + while True: + stream_p = self.stream_out_q.get() + if stream_p is None: + break + yield stream_p + + def stream_recv_wrapper(stream): + @parse_p4runtime_error + def stream_recv(): + for stream_p in stream: + if stream_p.HasField("arbitration"): + self.stream_in_q["arbitration"].put(stream_p) + elif stream_p.HasField("packet"): + self.stream_in_q["packet"].put(stream_p) + elif stream_p.HasField("digest"): + self.stream_in_q["digest"].put(stream_p) + else: + self.stream_in_q["unknown"].put(stream_p) + + try: + stream_recv() + except P4RuntimeException as ex: + logging.critical("StreamChannel error, closing stream") + logging.critical(ex) + for k in self.stream_in_q: + self.stream_in_q[k].put(None) + + self.stream = self.stub.StreamChannel(stream_req_iterator()) + self.stream_recv_thread = threading.Thread( + target=stream_recv_wrapper, args=(self.stream,)) + self.stream_recv_thread.start() + self.handshake() + + def handshake(self): + """ + Handshake with gRPC server. + """ + + req = p4runtime_pb2.StreamMessageRequest() + arbitration = req.arbitration + arbitration.device_id = self.device_id + election_id = arbitration.election_id + election_id.high = self.election_id[0] + election_id.low = self.election_id[1] + if self.role_name is not None: + arbitration.role.name = self.role_name + self.stream_out_q.put(req) + + rep = self.get_stream_packet(STREAM_ATTR_ARBITRATION, timeout=2) + if rep is None: + logging.critical("Failed to establish session with server") + sys.exit(1) + is_primary = (rep.arbitration.status.code == code_pb2.OK) + logging.debug("Session established, client is '%s'", + "primary" if is_primary else "backup") + if not is_primary: + print("You are not the primary client," + "you only have read access to the server") + + def get_stream_packet(self, type_, timeout=1): + """ + Get a new message from the stream. + + :param type_: stream type. + :param timeout: time to wait. + :return: message or None + """ + if type_ not in self.stream_in_q: + print("Unknown stream type 's"'', type_) + return None + try: + msg = self.stream_in_q[type_].get(timeout=timeout) + return msg + except queue.Empty: # timeout expired + return None + + @parse_p4runtime_error + def get_p4info(self): + """ + Retrieve P4Info content. + + :return: P4Info object. + """ + logging.debug("Retrieving P4Info file") + req = p4runtime_pb2.GetForwardingPipelineConfigRequest() + req.device_id = self.device_id + req.response_type = \ + p4runtime_pb2.GetForwardingPipelineConfigRequest.P4INFO_AND_COOKIE + rep = self.stub.GetForwardingPipelineConfig(req) + return rep.config.p4info + + @parse_p4runtime_error + def set_fwd_pipe_config(self, p4info_path, bin_path): + """ + Configure the pipeline. + + :param p4info_path: path to the P4Info file + :param bin_path: path to the binary file + :return: + """ + logging.debug("Setting forwarding pipeline config") + req = p4runtime_pb2.SetForwardingPipelineConfigRequest() + req.device_id = self.device_id + if self.role_name is not None: + req.role = self.role_name + election_id = req.election_id + election_id.high = self.election_id[0] + election_id.low = self.election_id[1] + req.action = \ + p4runtime_pb2.SetForwardingPipelineConfigRequest.VERIFY_AND_COMMIT + with open(p4info_path, "r", encoding="utf-8") as f_info: + with open(bin_path, "rb") as f_bin: + try: + google.protobuf.text_format.Merge( + f_info.read(), req.config.p4info) + except google.protobuf.text_format.ParseError: + logging.error("Error when parsing P4Info") + raise + req.config.p4_device_config = f_bin.read() + return self.stub.SetForwardingPipelineConfig(req) + + def tear_down(self): + """ + Tear connection with the gRPC server down. + """ + if self.stream_out_q: + logging.debug("Cleaning up stream") + self.stream_out_q.put(None) + if self.stream_in_q: + for k in self.stream_in_q: + self.stream_in_q[k].put(None) + if self.stream_recv_thread: + self.stream_recv_thread.join() + self.channel.close() + # avoid a race condition if channel deleted when process terminates + del self.channel + + @parse_p4runtime_write_error + def __write(self, entity, mode=WriteOperation.insert): + """ + Perform a write operation. + + :param entity: P4 entity to write + :param mode: operation mode (defaults to insert) + :return: void + """ + if isinstance(entity, (list, tuple)): + for ent in entity: + self.__write(ent) + return + req = self.__get_new_write_request() + update = req.updates.add() + update.type = select_operation(mode) + msg_entity = select_entity_type(entity, update) + if not msg_entity: + msg = f"{mode.name} operation for entity {entity.__name__}" \ + f"not supported" + raise P4RuntimeWriteException(msg) + msg_entity.CopyFrom(entity) + self.__simple_write(req) + + def __get_new_write_request(self): + """ + Create a new write request message. + + :return: write request message + """ + req = p4runtime_pb2.WriteRequest() + req.device_id = self.device_id + if self.role_name is not None: + req.role = self.role_name + election_id = req.election_id + election_id.high = self.election_id[0] + election_id.low = self.election_id[1] + return req + + @parse_p4runtime_write_error + def __simple_write(self, req): + """ + Send a write operation into the wire. + + :param req: write operation request + :return: void + """ + try: + return self.stub.Write(req) + except grpc.RpcError as ex: + if ex.code() != grpc.StatusCode.UNKNOWN: + raise ex + raise P4RuntimeWriteException(ex) from ex + + @parse_p4runtime_write_error + def insert(self, entity): + """ + Perform an insert write operation. + + :param entity: P4 entity to insert + :return: void + """ + return self.__write(entity, WriteOperation.insert) + + @parse_p4runtime_write_error + def update(self, entity): + """ + Perform an update write operation. + + :param entity: P4 entity to update + :return: void + """ + return self.__write(entity, WriteOperation.update) + + @parse_p4runtime_write_error + def delete(self, entity): + """ + Perform a delete write operation. + + :param entity: P4 entity to delete + :return: void + """ + return self.__write(entity, WriteOperation.delete) + + @parse_p4runtime_write_error + def write(self, req): + """ + Write device operation. + + :param req: write request message + :return: status + """ + req.device_id = self.device_id + if self.role_name is not None: + req.role = self.role_name + election_id = req.election_id + election_id.high = self.election_id[0] + election_id.low = self.election_id[1] + return self.__simple_write(req) + + @parse_p4runtime_write_error + def write_update(self, update): + """ + Update device operation. + + :param update: update request message + :return: status + """ + req = self.__get_new_write_request() + req.updates.extend([update]) + return self.__simple_write(req) + + # Decorator is useless here: in case of server error, + # the exception is raised during the iteration (when next() is called). + @parse_p4runtime_error + def read_one(self, entity): + """ + Read device operation. + + :param entity: P4 entity for which the read is issued + :return: status + """ + req = p4runtime_pb2.ReadRequest() + if self.role_name is not None: + req.role = self.role_name + req.device_id = self.device_id + req.entities.extend([entity]) + return self.stub.Read(req) + + @parse_p4runtime_error + def api_version(self): + """ + P4Runtime API version. + + :return: API version hex + """ + req = p4runtime_pb2.CapabilitiesRequest() + rep = self.stub.Capabilities(req) + return rep.p4runtime_api_version diff --git a/src/device/service/drivers/p4/p4_common.py b/src/device/service/drivers/p4/p4_common.py new file mode 100644 index 0000000000000000000000000000000000000000..bcafedc1f613bfe1d1739d72f89803155b720155 --- /dev/null +++ b/src/device/service/drivers/p4/p4_common.py @@ -0,0 +1,445 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This package contains several helper functions for encoding to and decoding from +byte strings: +- integers +- IPv4 address strings +- IPv6 address strings +- Ethernet address strings +as well as static variables used by various P4 driver components. +""" + +import logging +import math +import re +import socket +import ipaddress +from ctypes import c_uint16, sizeof +import macaddress + +from common.type_checkers.Checkers import chk_type +try: + from .p4_exception import UserBadValueError +except ImportError: + from p4_exception import UserBadValueError + +P4_ATTR_DEV_ID = "id" +P4_ATTR_DEV_NAME = "name" +P4_ATTR_DEV_VENDOR = "vendor" +P4_ATTR_DEV_HW_VER = "hw_ver" +P4_ATTR_DEV_SW_VER = "sw_ver" +P4_ATTR_DEV_P4BIN = "p4bin" +P4_ATTR_DEV_P4INFO = "p4info" +P4_ATTR_DEV_TIMEOUT = "timeout" + +P4_VAL_DEF_VENDOR = "Unknown" +P4_VAL_DEF_HW_VER = "BMv2 simple_switch" +P4_VAL_DEF_SW_VER = "Stratum" +P4_VAL_DEF_TIMEOUT = 60 + + +# Logger instance +LOGGER = logging.getLogger(__name__) + + +# MAC address encoding/decoding +mac_pattern = re.compile(r"^([\da-fA-F]{2}:){5}([\da-fA-F]{2})$") + + +def matches_mac(mac_addr_string): + """ + Check whether input string is a valid MAC address or not. + + :param mac_addr_string: string-based MAC address + :return: boolean status + """ + return mac_pattern.match(mac_addr_string) is not None + + +def encode_mac(mac_addr_string): + """ + Convert string-based MAC address into bytes. + + :param mac_addr_string: string-based MAC address + :return: MAC address in bytes + """ + return bytes(macaddress.MAC(mac_addr_string)) + + +def decode_mac(encoded_mac_addr): + """ + Convert a MAC address in bytes into string-based MAC address. + + :param encoded_mac_addr: MAC address in bytes + :return: string-based MAC address + """ + return str(macaddress.MAC(encoded_mac_addr)).replace("-", ":").lower() + + +# IP address encoding/decoding +IPV4_LOCALHOST = "localhost" + + +def matches_ipv4(ip_addr_string): + """ + Check whether input string is a valid IPv4 address or not. + + :param ip_addr_string: string-based IPv4 address + :return: boolean status + """ + if ip_addr_string == IPV4_LOCALHOST: + return True + try: + addr = ipaddress.ip_address(ip_addr_string) + return isinstance(addr, ipaddress.IPv4Address) + except ValueError: + return False + + +def encode_ipv4(ip_addr_string): + """ + Convert string-based IPv4 address into bytes. + + :param ip_addr_string: string-based IPv4 address + :return: IPv4 address in bytes + """ + return socket.inet_aton(ip_addr_string) + + +def decode_ipv4(encoded_ip_addr): + """ + Convert an IPv4 address in bytes into string-based IPv4 address. + + :param encoded_ip_addr: IPv4 address in bytes + :return: string-based IPv4 address + """ + return socket.inet_ntoa(encoded_ip_addr) + + +def matches_ipv6(ip_addr_string): + """ + Check whether input string is a valid IPv6 address or not. + + :param ip_addr_string: string-based IPv6 address + :return: boolean status + """ + try: + addr = ipaddress.ip_address(ip_addr_string) + return isinstance(addr, ipaddress.IPv6Address) + except ValueError: + return False + + +def encode_ipv6(ip_addr_string): + """ + Convert string-based IPv6 address into bytes. + + :param ip_addr_string: string-based IPv6 address + :return: IPv6 address in bytes + """ + return socket.inet_pton(socket.AF_INET6, ip_addr_string) + + +def decode_ipv6(encoded_ip_addr): + """ + Convert an IPv6 address in bytes into string-based IPv6 address. + + :param encoded_ip_addr: IPv6 address in bytes + :return: string-based IPv6 address + """ + return str(ipaddress.ip_address(encoded_ip_addr)) + + +# Numerical encoding/decoding + + +def limits(c_int_type): + """ + Discover limits of numerical type. + + :param c_int_type: numerical type + :return: tuple of numerical type's limits + """ + signed = c_int_type(-1).value < c_int_type(0).value + bit_size = sizeof(c_int_type) * 8 + signed_limit = 2 ** (bit_size - 1) + return (-signed_limit, signed_limit - 1) \ + if signed else (0, 2 * signed_limit - 1) + + +def valid_port(port): + """ + Check whether input is a valid port number or not. + + :param port: port number + :return: boolean status + """ + lim = limits(c_uint16) + return lim[0] <= port <= lim[1] + + +def bitwidth_to_bytes(bitwidth): + """ + Convert number of bits to number of bytes. + + :param bitwidth: number of bits + :return: number of bytes + """ + return int(math.ceil(bitwidth / 8.0)) + + +def encode_num(number, bitwidth): + """ + Convert number into bytes. + + :param number: number to convert + :param bitwidth: number of bits + :return: number in bytes + """ + byte_len = bitwidth_to_bytes(bitwidth) + return number.to_bytes(byte_len, byteorder="big") + + +def decode_num(encoded_number): + """ + Convert number in bytes into its numerical form. + + :param encoded_number: number in bytes to convert + :return: numerical number form + """ + return int.from_bytes(encoded_number, "big") + + +# Umbrella encoder + + +def encode(variable, bitwidth): + """ + Tries to infer the type of `input` and encode it. + + :param variable: target variable + :param bitwidth: size of variable in bits + :return: encoded bytes + """ + byte_len = bitwidth_to_bytes(bitwidth) + if isinstance(variable, (list, tuple)) and len(variable) == 1: + variable = variable[0] + + if isinstance(variable, int): + encoded_bytes = encode_num(variable, bitwidth) + elif isinstance(variable, str): + if matches_mac(variable): + encoded_bytes = encode_mac(variable) + elif matches_ipv4(variable): + encoded_bytes = encode_ipv4(variable) + elif matches_ipv6(variable): + encoded_bytes = encode_ipv6(variable) + else: + try: + value = int(variable, 0) + except ValueError as ex: + raise UserBadValueError( + f"Invalid value '{variable}': " + "could not cast to integer, try in hex with 0x prefix")\ + from ex + encoded_bytes = value.to_bytes(byte_len, byteorder="big") + else: + raise Exception( + f"Encoding objects of {type(variable)} is not supported") + assert len(encoded_bytes) == byte_len + return encoded_bytes + + +# Parsers + + +def get_match_field_value(match_field): + """ + Retrieve the value of a certain match field by name. + + :param match_field: match field + :return: match filed value + """ + match_type = match_field.WhichOneof("field_match_type") + if match_type == "valid": + return match_field.valid.value + if match_type == "exact": + return match_field.exact.value + if match_type == "lpm": + return match_field.lpm.value, match_field.lpm.prefix_len + if match_type == "ternary": + return match_field.ternary.value, match_field.ternary.mask + if match_type == "range": + return match_field.range.low, match_field.range.high + raise Exception(f"Unsupported match type with type {match_type}") + + +def parse_resource_string_from_json(resource, resource_str="table-name"): + """ + Parse a given resource name within a JSON-based object. + + :param resource: JSON-based object + :param resource_str: resource string to parse + :return: value of the parsed resource string + """ + if not resource or (resource_str not in resource): + LOGGER.warning("JSON entry misses '%s' attribute", resource_str) + return None + chk_type(resource_str, resource[resource_str], str) + return resource[resource_str] + + +def parse_resource_number_from_json(resource, resource_nb): + """ + Parse a given resource number within a JSON-based object. + + :param resource: JSON-based object + :param resource_nb: resource number to parse + :return: value of the parsed resource number + """ + if not resource or (resource_nb not in resource): + LOGGER.warning( + "JSON entry misses '%s' attribute", resource_nb) + return None + chk_type(resource_nb, resource[resource_nb], int) + return resource[resource_nb] + + +def parse_resource_integer_from_json(resource, resource_nb): + """ + Parse a given integer number within a JSON-based object. + + :param resource: JSON-based object + :param resource_nb: resource number to parse + :return: value of the parsed resource number + """ + num = parse_resource_number_from_json(resource, resource_nb) + if num: + return int(num) + return -1 + + +def parse_resource_float_from_json(resource, resource_nb): + """ + Parse a given floating point number within a JSON-based object. + + :param resource: JSON-based object + :param resource_nb: resource number to parse + :return: value of the parsed resource number + """ + num = parse_resource_number_from_json(resource, resource_nb) + if num: + return float(num) + return -1.0 + + +def parse_resource_bytes_from_json(resource, resource_bytes): + """ + Parse given resource bytes within a JSON-based object. + + :param resource: JSON-based object + :param resource_bytes: resource bytes to parse + :return: value of the parsed resource bytes + """ + if not resource or (resource_bytes not in resource): + LOGGER.debug( + "JSON entry misses '%s' attribute", resource_bytes) + return None + + if resource_bytes in resource: + chk_type(resource_bytes, resource[resource_bytes], bytes) + return resource[resource_bytes] + return None + + +def parse_match_operations_from_json(resource): + """ + Parse the match operations within a JSON-based object. + + :param resource: JSON-based object + :return: map of match operations + """ + if not resource or ("match-fields" not in resource): + LOGGER.warning( + "JSON entry misses 'match-fields' list of attributes") + return {} + chk_type("match-fields", resource["match-fields"], list) + + match_map = {} + for mf_entry in resource["match-fields"]: + if ("match-field" not in mf_entry) or \ + ("match-value" not in mf_entry): + LOGGER.warning( + "JSON entry misses 'match-field' and/or " + "'match-value' attributes") + return None + chk_type("match-field", mf_entry["match-field"], str) + chk_type("match-value", mf_entry["match-value"], str) + match_map[mf_entry["match-field"]] = mf_entry["match-value"] + + return match_map + + +def parse_action_parameters_from_json(resource): + """ + Parse the action parameters within a JSON-based object. + + :param resource: JSON-based object + :return: map of action parameters + """ + if not resource or ("action-params" not in resource): + LOGGER.warning( + "JSON entry misses 'action-params' list of attributes") + return None + chk_type("action-params", resource["action-params"], list) + + action_name = parse_resource_string_from_json(resource, "action-name") + + action_params = {} + for ac_entry in resource["action-params"]: + if not ac_entry: + LOGGER.debug( + "Missing action parameter for action %s", action_name) + continue + chk_type("action-param", ac_entry["action-param"], str) + chk_type("action-value", ac_entry["action-value"], str) + action_params[ac_entry["action-param"]] = \ + ac_entry["action-value"] + + return action_params + + +def parse_integer_list_from_json(resource, resource_list, resource_item): + """ + Parse the list of integers within a JSON-based object. + + :param resource: JSON-based object + :param resource_list: name of the resource list + :param resource_item: name of the resource item + :return: list of integers + """ + if not resource or (resource_list not in resource): + LOGGER.warning( + "JSON entry misses '%s' list of attributes", resource_list) + return [] + chk_type(resource_list, resource[resource_list], list) + + integers_list = [] + for item in resource[resource_list]: + chk_type(resource_item, item[resource_item], int) + integers_list.append(item[resource_item]) + + return integers_list diff --git a/src/device/service/drivers/p4/p4_context.py b/src/device/service/drivers/p4/p4_context.py new file mode 100644 index 0000000000000000000000000000000000000000..ab01c422fe478cfe26c2f7331fc9b4653521db9f --- /dev/null +++ b/src/device/service/drivers/p4/p4_context.py @@ -0,0 +1,284 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Build some context around a given P4 info file. +""" + +from collections import Counter +import enum +from functools import partialmethod + + +@enum.unique +class P4Type(enum.Enum): + """ + P4 types. + """ + table = 1 + action = 2 + action_profile = 3 + counter = 4 + direct_counter = 5 + meter = 6 + direct_meter = 7 + controller_packet_metadata = 8 + + +P4Type.table.p4info_name = "tables" +P4Type.action.p4info_name = "actions" +P4Type.action_profile.p4info_name = "action_profiles" +P4Type.counter.p4info_name = "counters" +P4Type.direct_counter.p4info_name = "direct_counters" +P4Type.meter.p4info_name = "meters" +P4Type.direct_meter.p4info_name = "direct_meters" +P4Type.controller_packet_metadata.p4info_name = "controller_packet_metadata" + +for object_type in P4Type: + object_type.pretty_name = object_type.name.replace('_', ' ') + object_type.pretty_names = object_type.pretty_name + 's' + + +@enum.unique +class P4RuntimeEntity(enum.Enum): + """ + P4 runtime entities. + """ + table_entry = 1 + action_profile_member = 2 + action_profile_group = 3 + meter_entry = 4 + direct_meter_entry = 5 + counter_entry = 6 + direct_counter_entry = 7 + packet_replication_engine_entry = 8 + + +class Context: + """ + P4 context. + """ + def __init__(self): + self.p4info = None + self.p4info_obj_map = {} + self.p4info_obj_map_by_id = {} + self.p4info_objs_by_type = {} + + def set_p4info(self, p4info): + """ + Set a p4 info file. + + :param p4info: p4 info file + :return: void + """ + self.p4info = p4info + self._import_p4info_names() + + def get_obj(self, obj_type, name): + """ + Retrieve an object by type and name. + + :param obj_type: P4 object type + :param name: P4 object name + :return: P4 object + """ + key = (obj_type, name) + return self.p4info_obj_map.get(key, None) + + def get_obj_id(self, obj_type, name): + """ + Retrieve a P4 object's ID by type and name. + + :param obj_type: P4 object type + :param name: P4 object name + :return: P4 object ID + """ + obj = self.get_obj(obj_type, name) + if obj is None: + return None + return obj.preamble.id + + def get_param(self, action_name, name): + """ + Get an action parameter by action name. + + :param action_name: P4 action name + :param name: action parameter name + :return: action parameter + """ + action = self.get_obj(P4Type.action, action_name) + if action is None: + return None + for param in action.params: + if param.name == name: + return param + return None + + def get_mf(self, table_name, name): + """ + Get a table's match field by name. + + :param table_name: P4 table name + :param name: match field name + :return: match field + """ + table = self.get_obj(P4Type.table, table_name) + if table is None: + return None + for match_field in table.match_fields: + if match_field.name == name: + return match_field + return None + + def get_param_id(self, action_name, name): + """ + Get an action parameter ID by the action and parameter names. + + :param action_name: P4 action name + :param name: action parameter name + :return: action parameter ID + """ + param = self.get_param(action_name, name) + return None if param is None else param.id + + def get_mf_id(self, table_name, name): + """ + Get a table's match field ID by name. + + :param table_name: P4 table name + :param name: match field name + :return: match field ID + """ + match_field = self.get_mf(table_name, name) + return None if match_field is None else match_field.id + + def get_param_name(self, action_name, id_): + """ + Get an action parameter name by the action name and action ID. + + :param action_name: P4 action name + :param id_: action parameter ID + :return: action parameter name + """ + action = self.get_obj(P4Type.action, action_name) + if action is None: + return None + for param in action.params: + if param.id == id_: + return param.name + return None + + def get_mf_name(self, table_name, id_): + """ + Get a table's match field name by ID. + + :param table_name: P4 table name + :param id_: match field ID + :return: match field name + """ + table = self.get_obj(P4Type.table, table_name) + if table is None: + return None + for match_field in table.match_fields: + if match_field.id == id_: + return match_field.name + return None + + def get_objs(self, obj_type): + """ + Get P4 objects by type. + + :param obj_type: P4 object type + :return: list of tuples (object name, object) + """ + objects = self.p4info_objs_by_type[obj_type] + for name, obj in objects.items(): + yield name, obj + + def get_name_from_id(self, id_): + """ + Get P4 object name by its ID. + + :param id_: P4 object ID + :return: P4 object name + """ + return self.p4info_obj_map_by_id[id_].preamble.name + + def get_obj_by_id(self, id_): + """ + Get P4 object by its ID. + + :param id_: P4 object ID + :return: P4 object + """ + return self.p4info_obj_map_by_id[id_] + + def get_packet_metadata_name_from_id(self, ctrl_pkt_md_name, id_): + """ + Get packet metadata name by ID. + + :param ctrl_pkt_md_name: packet replication entity name + :param id_: packet metadata ID + :return: packet metadata name + """ + ctrl_pkt_md = self.get_obj( + P4Type.controller_packet_metadata, ctrl_pkt_md_name) + if not ctrl_pkt_md: + return None + for meta in ctrl_pkt_md.metadata: + if meta.id == id_: + return meta.name + return None + + # We accept any suffix that uniquely identifies the object + # among p4info objects of the same type. + def _import_p4info_names(self): + """ + Import p4 info into memory. + + :return: void + """ + suffix_count = Counter() + for obj_type in P4Type: + self.p4info_objs_by_type[obj_type] = {} + for obj in getattr(self.p4info, obj_type.p4info_name): + pre = obj.preamble + self.p4info_obj_map_by_id[pre.id] = obj + self.p4info_objs_by_type[obj_type][pre.name] = obj + suffix = None + for suf in reversed(pre.name.split(".")): + suffix = suf if suffix is None else suf + "." + suffix + key = (obj_type, suffix) + self.p4info_obj_map[key] = obj + suffix_count[key] += 1 + for key, cnt in suffix_count.items(): + if cnt > 1: + del self.p4info_obj_map[key] + + +# Add p4info object and object id "getters" for each object type; +# these are just wrappers around Context.get_obj and Context.get_obj_id. +# For example: get_table(x) and get_table_id(x) respectively call +# get_obj(P4Type.table, x) and get_obj_id(P4Type.table, x) +for object_type in P4Type: + object_name = "_".join(["get", object_type.name]) + setattr(Context, object_name, partialmethod( + Context.get_obj, object_type)) + object_name = "_".join(["get", object_type.name, "id"]) + setattr(Context, object_name, partialmethod( + Context.get_obj_id, object_type)) + +for object_type in P4Type: + object_name = "_".join(["get", object_type.p4info_name]) + setattr(Context, object_name, partialmethod(Context.get_objs, object_type)) diff --git a/src/device/service/drivers/p4/p4_driver.py b/src/device/service/drivers/p4/p4_driver.py index af05952b313d1632eacd5962cc34c4aa1b6b5a10..069c07ce40e43192b74519b2175e7e10c638cd20 100644 --- a/src/device/service/drivers/p4/p4_driver.py +++ b/src/device/service/drivers/p4/p4_driver.py @@ -16,13 +16,22 @@ P4 driver plugin for the TeraFlow SDN controller. """ +import os +import json import logging import threading from typing import Any, Iterator, List, Optional, Tuple, Union -from .p4_util import P4RuntimeClient,\ +from common.type_checkers.Checkers import chk_type, chk_length, chk_string +from .p4_common import matches_ipv4, matches_ipv6, valid_port,\ P4_ATTR_DEV_ID, P4_ATTR_DEV_NAME, P4_ATTR_DEV_VENDOR,\ - P4_ATTR_DEV_HW_VER, P4_ATTR_DEV_SW_VER, P4_ATTR_DEV_PIPECONF,\ - P4_VAL_DEF_VENDOR, P4_VAL_DEF_HW_VER, P4_VAL_DEF_SW_VER, P4_VAL_DEF_PIPECONF + P4_ATTR_DEV_HW_VER, P4_ATTR_DEV_SW_VER,\ + P4_ATTR_DEV_P4BIN, P4_ATTR_DEV_P4INFO, P4_ATTR_DEV_TIMEOUT,\ + P4_VAL_DEF_VENDOR, P4_VAL_DEF_HW_VER, P4_VAL_DEF_SW_VER,\ + P4_VAL_DEF_TIMEOUT +from .p4_manager import P4Manager, get_api_version, KEY_TABLE,\ + KEY_ACTION_PROFILE, KEY_COUNTER, KEY_DIR_COUNTER, KEY_METER, KEY_DIR_METER,\ + KEY_CTL_PKT_METADATA +from .p4_client import WriteOperation try: from _Driver import _Driver @@ -53,208 +62,543 @@ class P4Driver(_Driver): Hardware version of the P4 device (Optional) sw_ver : str Software version of the P4 device (Optional) - pipeconf : str - P4 device table configuration (Optional) + p4bin : str + Path to P4 binary file (Optional, but must be combined with p4info) + p4info : str + Path to P4 info file (Optional, but must be combined with p4bin) + timeout : int + Device timeout in seconds (Optional) """ def __init__(self, address: str, port: int, **settings) -> None: # pylint: disable=super-init-not-called - self.__client = None + self.__manager = None self.__address = address self.__port = int(port) + self.__endpoint = None self.__settings = settings - - try: - self.__dev_id = self.__settings.get(P4_ATTR_DEV_ID) - except Exception as ex: - LOGGER.error('P4 device ID is a mandatory setting') - raise Exception from ex - - if P4_ATTR_DEV_NAME in self.__settings: - self.__dev_name = self.__settings.get(P4_ATTR_DEV_NAME) - else: - self.__dev_name = str(self.__dev_id) - LOGGER.warning( - 'No device name is provided. Setting default name: %s', - self.__dev_name) - - if P4_ATTR_DEV_VENDOR in self.__settings: - self.__dev_vendor = self.__settings.get(P4_ATTR_DEV_VENDOR) - else: - self.__dev_vendor = P4_VAL_DEF_VENDOR - LOGGER.warning( - 'No vendor is provided. Setting default vendor: %s', - self.__dev_vendor) - - if P4_ATTR_DEV_HW_VER in self.__settings: - self.__dev_hw_version = self.__settings.get(P4_ATTR_DEV_HW_VER) - else: - self.__dev_hw_version = P4_VAL_DEF_HW_VER - LOGGER.warning( - 'No HW version is provided. Setting default HW version: %s', - self.__dev_hw_version) - - if P4_ATTR_DEV_SW_VER in self.__settings: - self.__dev_sw_version = self.__settings.get(P4_ATTR_DEV_SW_VER) - else: - self.__dev_sw_version = P4_VAL_DEF_SW_VER - LOGGER.warning( - 'No SW version is provided. Setting default SW version: %s', - self.__dev_sw_version) - - if P4_ATTR_DEV_PIPECONF in self.__settings: - self.__dev_pipeconf = self.__settings.get(P4_ATTR_DEV_PIPECONF) - else: - self.__dev_pipeconf = P4_VAL_DEF_PIPECONF - LOGGER.warning( - 'No P4 pipeconf is provided. Setting default P4 pipeconf: %s', - self.__dev_pipeconf) - + self.__id = None + self.__name = None + self.__vendor = P4_VAL_DEF_VENDOR + self.__hw_version = P4_VAL_DEF_HW_VER + self.__sw_version = P4_VAL_DEF_SW_VER + self.__p4bin_path = None + self.__p4info_path = None + self.__timeout = P4_VAL_DEF_TIMEOUT self.__lock = threading.Lock() self.__started = threading.Event() self.__terminate = threading.Event() - LOGGER.info('Initializing P4 device at %s:%d with settings:', + self.__parse_and_validate_settings() + + LOGGER.info("Initializing P4 device at %s:%d with settings:", self.__address, self.__port) for key, value in settings.items(): - LOGGER.info('\t%8s = %s', key, value) + LOGGER.info("\t%8s = %s", key, value) def Connect(self) -> bool: """ - Establishes a connection between the P4 device driver and a P4 device. + Establish a connection between the P4 device driver and a P4 device. :return: boolean connection status. """ - LOGGER.info( - 'Connecting to P4 device %s:%d ...', - self.__address, self.__port) + LOGGER.info("Connecting to P4 device %s ...", self.__endpoint) with self.__lock: # Skip if already connected if self.__started.is_set(): return True - # Instantiate a gRPC channel with the P4 device - grpc_address = f'{self.__address}:{self.__port}' + # Dynamically devise an election ID election_id = (1, 0) - self.__client = P4RuntimeClient( - self.__dev_id, grpc_address, election_id) - LOGGER.info('\tConnected!') + + # Spawn a P4 manager for this device + self.__manager = P4Manager( + device_id=self.__id, + ip_address=self.__address, + port=self.__port, + election_id=election_id) + assert self.__manager + + # Start the P4 manager + try: + self.__manager.start(self.__p4bin_path, self.__p4info_path) + except Exception as ex: # pylint: disable=broad-except + raise Exception(ex) from ex + + LOGGER.info("\tConnected via P4Runtime version %s", + get_api_version()) self.__started.set() return True def Disconnect(self) -> bool: """ - Terminates the connection between the P4 device driver and a P4 device. + Terminate the connection between the P4 device driver and a P4 device. :return: boolean disconnection status. """ - LOGGER.info( - 'Disconnecting from P4 device %s:%d ...', - self.__address, self.__port) + LOGGER.info("Disconnecting from P4 device %s ...", self.__endpoint) # If not started, assume it is already disconnected if not self.__started.is_set(): return True - # gRPC client must already be instantiated - assert self.__client + # P4 manager must already be instantiated + assert self.__manager # Trigger termination of loops and processes self.__terminate.set() # Trigger connection tear down with the P4Runtime server - self.__client.tear_down() - self.__client = None + self.__manager.stop() + self.__manager = None - LOGGER.info('\tDisconnected!') + LOGGER.info("\tDisconnected!") return True def GetInitialConfig(self) -> List[Tuple[str, Any]]: """ - Retrieves the initial configuration of a P4 device. + Retrieve the initial configuration of a P4 device. :return: list of initial configuration items. """ - LOGGER.info('P4 GetInitialConfig()') - return [] + initial_conf = [] - def GetConfig(self, resource_keys : List[str] = [])\ + with self.__lock: + if not initial_conf: + LOGGER.warning("No initial configuration for P4 device %s ...", + self.__endpoint) + return [] + + def GetConfig(self, resource_keys: List[str] = [])\ -> List[Tuple[str, Union[Any, None, Exception]]]: """ - Retrieves the current configuration of a P4 device. + Retrieve the current configuration of a P4 device. - :param resource_keys: configuration parameters to retrieve. - :return: list of values associated with the requested resource keys. + :param resource_keys: P4 resource keys to retrieve. + :return: list of values associated with the requested resource keys or + None/Exception. """ + LOGGER.info( + "Getting configuration from P4 device %s ...", self.__endpoint) - LOGGER.info('P4 GetConfig()') - return [] + # No resource keys means fetch all configuration + if len(resource_keys) == 0: + LOGGER.warning( + "GetConfig with no resource keys " + "implies getting all resource keys!") + resource_keys = [ + obj_name for obj_name, _ in self.__manager.p4_objects.items() + ] + + # Verify the input type + chk_type("resources", resource_keys, list) + + with self.__lock: + return self.__get_resources(resource_keys) - def SetConfig(self, resources : List[Tuple[str, Any]])\ + def SetConfig(self, resources: List[Tuple[str, Any]])\ -> List[Union[bool, Exception]]: """ - Submits a new configuration to a P4 device. + Submit a new configuration to a P4 device. - :param resources: configuration parameters to set. - :return: list of results for resource key changes requested. + :param resources: P4 resources to set. + :return: list of boolean results or Exceptions for resource key + changes requested. """ - LOGGER.info('P4 SetConfig()') - return [] + LOGGER.info( + "Setting configuration to P4 device %s ...", self.__endpoint) - def DeleteConfig(self, resources : List[Tuple[str, Any]])\ + if not resources or len(resources) == 0: + LOGGER.warning( + "SetConfig requires a list of resources to store " + "into the device. Nothing is provided though.") + return [] + + assert isinstance(resources, list) + + with self.__lock: + return self.__set_resources(resources) + + def DeleteConfig(self, resources: List[Tuple[str, Any]])\ -> List[Union[bool, Exception]]: """ - Revokes P4 device configuration. + Revoke P4 device configuration. :param resources: list of tuples with resource keys to be deleted. - :return: list of results for resource key deletions requested. + :return: list of boolean results or Exceptions for resource key + deletions requested. """ - LOGGER.info('P4 DeleteConfig()') - return [] + LOGGER.info( + "Deleting configuration from P4 device %s ...", self.__endpoint) + + if not resources or len(resources) == 0: + LOGGER.warning( + "DeleteConfig requires a list of resources to delete " + "from the device. Nothing is provided though.") + return [] - def GetResource(self, endpoint_uuid : str) -> Optional[str]: + with self.__lock: + return self.__delete_resources(resources) + + def GetResource(self, endpoint_uuid: str) -> Optional[str]: """ - Retrieves a certain resource from a P4 device. + Retrieve a certain resource from a P4 device. :param endpoint_uuid: target endpoint UUID. :return: The path of the endpoint or None if not found. """ - LOGGER.info('P4 GetResource()') + LOGGER.warning("GetResource() RPC not yet implemented by the P4 driver") return "" - def GetState(self, blocking=False, terminate : Optional[threading.Event] = None) -> Iterator[Tuple[str, Any]]: + def GetState(self, + blocking=False, + terminate: Optional[threading.Event] = None) -> \ + Iterator[Tuple[str, Any]]: """ - Retrieves the state of a P4 device. + Retrieve the state of a P4 device. :param blocking: if non-blocking, the driver terminates the loop and returns. + :param terminate: termination flag. :return: sequences of state sample. """ - LOGGER.info('P4 GetState()') + LOGGER.warning("GetState() RPC not yet implemented by the P4 driver") return [] - def SubscribeState(self, subscriptions : List[Tuple[str, float, float]])\ + def SubscribeState(self, subscriptions: List[Tuple[str, float, float]])\ -> List[Union[bool, Exception]]: """ - Subscribes to certain state information. + Subscribe to certain state information. :param subscriptions: list of tuples with resources to be subscribed. :return: list of results for resource subscriptions requested. """ - LOGGER.info('P4 SubscribeState()') - return [] + LOGGER.warning( + "SubscribeState() RPC not yet implemented by the P4 driver") + return [False for _ in subscriptions] - def UnsubscribeState(self, subscriptions : List[Tuple[str, float, float]])\ + def UnsubscribeState(self, subscriptions: List[Tuple[str, float, float]])\ -> List[Union[bool, Exception]]: """ - Unsubscribes from certain state information. + Unsubscribe from certain state information. :param subscriptions: list of tuples with resources to be unsubscribed. :return: list of results for resource un-subscriptions requested. """ - LOGGER.info('P4 UnsubscribeState()') - return [] + LOGGER.warning( + "UnsubscribeState() RPC not yet implemented by the P4 driver") + return [False for _ in subscriptions] + + def get_manager(self): + """ + Get an instance of the P4 manager. + + :return: P4 manager instance + """ + return self.__manager + + def __parse_and_validate_settings(self): + """ + Verify that the driver inputs comply to what is expected. + + :return: void or exception in case of validation error + """ + # Device endpoint information + assert matches_ipv4(self.__address) or (matches_ipv6(self.__address)),\ + f"{self.__address} not a valid IPv4 or IPv6 address" + assert valid_port(self.__port), \ + f"{self.__port} not a valid transport port" + self.__endpoint = f"{self.__address}:{self.__port}" + + # Device ID + try: + self.__id = self.__settings.get(P4_ATTR_DEV_ID) + except Exception as ex: + LOGGER.error("P4 device ID is a mandatory setting") + raise Exception from ex + + # Device name + if P4_ATTR_DEV_NAME in self.__settings: + self.__name = self.__settings.get(P4_ATTR_DEV_NAME) + else: + self.__name = str(self.__id) + LOGGER.warning( + "No device name is provided. Setting default name: %s", + self.__name) + + # Device vendor + if P4_ATTR_DEV_VENDOR in self.__settings: + self.__vendor = self.__settings.get(P4_ATTR_DEV_VENDOR) + else: + LOGGER.warning( + "No device vendor is provided. Setting default vendor: %s", + self.__vendor) + + # Device hardware version + if P4_ATTR_DEV_HW_VER in self.__settings: + self.__hw_version = self.__settings.get(P4_ATTR_DEV_HW_VER) + else: + LOGGER.warning( + "No HW version is provided. Setting default HW version: %s", + self.__hw_version) + + # Device software version + if P4_ATTR_DEV_SW_VER in self.__settings: + self.__sw_version = self.__settings.get(P4_ATTR_DEV_SW_VER) + else: + LOGGER.warning( + "No SW version is provided. Setting default SW version: %s", + self.__sw_version) + + # Path to P4 binary file + if P4_ATTR_DEV_P4BIN in self.__settings: + self.__p4bin_path = self.__settings.get(P4_ATTR_DEV_P4BIN) + assert os.path.exists(self.__p4bin_path),\ + "Invalid path to p4bin file" + assert P4_ATTR_DEV_P4INFO in self.__settings,\ + "p4info and p4bin settings must be provided together" + + # Path to P4 info file + if P4_ATTR_DEV_P4INFO in self.__settings: + self.__p4info_path = self.__settings.get(P4_ATTR_DEV_P4INFO) + assert os.path.exists(self.__p4info_path),\ + "Invalid path to p4info file" + assert P4_ATTR_DEV_P4BIN in self.__settings,\ + "p4info and p4bin settings must be provided together" + + if (not self.__p4bin_path) or (not self.__p4info_path): + LOGGER.warning( + "No P4 binary and info files are provided, hence " + "no pipeline will be installed on the whitebox device.\n" + "This driver will attempt to manage whatever pipeline " + "is available on the target device.") + + # Device timeout + if P4_ATTR_DEV_TIMEOUT in self.__settings: + self.__timeout = self.__settings.get(P4_ATTR_DEV_TIMEOUT) + assert self.__timeout > 0,\ + "Device timeout must be a positive integer" + else: + LOGGER.warning( + "No device timeout is provided. Setting default timeout: %s", + self.__timeout) + + def __get_resources(self, resource_keys): + """ + Retrieve the current configuration of a P4 device. + + :param resource_keys: P4 resource keys to retrieve. + :return: list of values associated with the requested resource keys or + None/Exception. + """ + resources = [] + + LOGGER.debug("GetConfig() -> Keys: %s", resource_keys) + + for resource_key in resource_keys: + entries = [] + try: + if KEY_TABLE == resource_key: + for table_name in self.__manager.get_table_names(): + t_entries = self.__manager.table_entries_to_json( + table_name) + if t_entries: + entries.append(t_entries) + elif KEY_COUNTER == resource_key: + for cnt_name in self.__manager.get_counter_names(): + c_entries = self.__manager.counter_entries_to_json( + cnt_name) + if c_entries: + entries.append(c_entries) + elif KEY_DIR_COUNTER == resource_key: + for d_cnt_name in self.__manager.get_direct_counter_names(): + dc_entries = \ + self.__manager.direct_counter_entries_to_json( + d_cnt_name) + if dc_entries: + entries.append(dc_entries) + elif KEY_METER == resource_key: + for meter_name in self.__manager.get_meter_names(): + m_entries = self.__manager.meter_entries_to_json( + meter_name) + if m_entries: + entries.append(m_entries) + elif KEY_DIR_METER == resource_key: + for d_meter_name in self.__manager.get_direct_meter_names(): + dm_entries = \ + self.__manager.direct_meter_entries_to_json( + d_meter_name) + if dm_entries: + entries.append(dm_entries) + elif KEY_ACTION_PROFILE == resource_key: + for ap_name in self.__manager.get_action_profile_names(): + ap_entries = \ + self.__manager.action_prof_member_entries_to_json( + ap_name) + if ap_entries: + entries.append(ap_entries) + elif KEY_CTL_PKT_METADATA == resource_key: + msg = f"{resource_key.capitalize()} is not a " \ + f"retrievable resource" + raise Exception(msg) + else: + msg = f"GetConfig failed due to invalid " \ + f"resource key: {resource_key}" + raise Exception(msg) + resources.append( + (resource_key, entries if entries else None) + ) + except Exception as ex: # pylint: disable=broad-except + resources.append((resource_key, ex)) + + return resources + + def __set_resources(self, resources): + """ + Submit a new configuration to a P4 device. + + :param resources: P4 resources to set. + :return: list of boolean results or Exceptions for resource key + changes requested. + """ + results = [] + + for i, resource in enumerate(resources): + str_resource_name = f"resources[#{i}]" + resource_key = "" + try: + chk_type( + str_resource_name, resource, (list, tuple)) + chk_length( + str_resource_name, resource, min_length=2, max_length=2) + resource_key, resource_value = resource + chk_string( + str_resource_name, resource_key, allow_empty=False) + except Exception as e: # pylint: disable=broad-except + LOGGER.exception( + "Exception validating %s: %s", + str_resource_name, str(resource_key)) + results.append(e) # store the exception if validation fails + continue + + try: + resource_value = json.loads(resource_value) + except Exception: # pylint: disable=broad-except + pass + + LOGGER.debug( + "SetConfig() -> Key: %s - Value: %s", + resource_key, resource_value) + + # Default operation is insert. + # P4 manager has internal logic to judge whether an entry + # to be inserted already exists, thus simply needs an update. + operation = WriteOperation.insert + + try: + self.__apply_operation(resource_key, resource_value, operation) + results.append(True) + except Exception as ex: # pylint: disable=broad-except + results.append(ex) + + print(results) + + return results + + def __delete_resources(self, resources): + """ + Revoke P4 device configuration. + + :param resources: list of tuples with resource keys to be deleted. + :return: list of boolean results or Exceptions for resource key + deletions requested. + """ + results = [] + + for i, resource in enumerate(resources): + str_resource_name = f"resources[#{i}]" + resource_key = "" + try: + chk_type( + str_resource_name, resource, (list, tuple)) + chk_length( + str_resource_name, resource, min_length=2, max_length=2) + resource_key, resource_value = resource + chk_string( + str_resource_name, resource_key, allow_empty=False) + except Exception as e: # pylint: disable=broad-except + LOGGER.exception( + "Exception validating %s: %s", + str_resource_name, str(resource_key)) + results.append(e) # store the exception if validation fails + continue + + try: + resource_value = json.loads(resource_value) + except Exception: # pylint: disable=broad-except + pass + + LOGGER.debug("DeleteConfig() -> Key: %s - Value: %s", + resource_key, resource_value) + + operation = WriteOperation.delete + + try: + self.__apply_operation(resource_key, resource_value, operation) + results.append(True) + except Exception as ex: # pylint: disable=broad-except + results.append(ex) + + print(results) + + return results + + def __apply_operation( + self, resource_key, resource_value, operation: WriteOperation): + """ + Apply a write operation to a P4 resource. + + :param resource_key: P4 resource key + :param resource_value: P4 resource value in JSON format + :param operation: write operation (i.e., insert, update, delete) + to apply + :return: True if operation is successfully applied or raise Exception + """ + + # Apply settings to the various tables + if KEY_TABLE == resource_key: + self.__manager.table_entry_operation_from_json( + resource_value, operation) + elif KEY_COUNTER == resource_key: + self.__manager.counter_entry_operation_from_json( + resource_value, operation) + elif KEY_DIR_COUNTER == resource_key: + self.__manager.direct_counter_entry_operation_from_json( + resource_value, operation) + elif KEY_METER == resource_key: + self.__manager.meter_entry_operation_from_json( + resource_value, operation) + elif KEY_DIR_METER == resource_key: + self.__manager.direct_meter_entry_operation_from_json( + resource_value, operation) + elif KEY_ACTION_PROFILE == resource_key: + self.__manager.action_prof_member_entry_operation_from_json( + resource_value, operation) + self.__manager.action_prof_group_entry_operation_from_json( + resource_value, operation) + elif KEY_CTL_PKT_METADATA == resource_key: + msg = f"{resource_key.capitalize()} is not a " \ + f"configurable resource" + raise Exception(msg) + else: + msg = f"{operation} on invalid key {resource_key}" + LOGGER.error(msg) + raise Exception(msg) + + LOGGER.debug("%s operation: %s", resource_key.capitalize(), operation) + + return True diff --git a/src/device/service/drivers/p4/p4_exception.py b/src/device/service/drivers/p4/p4_exception.py new file mode 100644 index 0000000000000000000000000000000000000000..3e3afb723b3850fd9a9b2b1c4982bf8ae31b20f7 --- /dev/null +++ b/src/device/service/drivers/p4/p4_exception.py @@ -0,0 +1,135 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +P4 driver exceptions. +""" + + +class UserError(Exception): + """ + User error exception. + """ + def __init__(self, info=""): + super().__init__() + self.info = info + + def __str__(self): + return self.info + + # TODO: find better way to get a custom traceback # pylint: disable=W0511 + def _render_traceback_(self): + return [str(self)] + + +class InvalidP4InfoError(Exception): + """ + Invalid P4 info exception. + """ + def __init__(self, info=""): + super().__init__() + self.info = info + + def __str__(self): + return f"Invalid P4Info message: {self.info}" + + def _render_traceback_(self): + return [str(self)] + + +class UnknownOptionName(UserError): + """ + Unknown option name exception. + """ + def __init__(self, option_name): + super().__init__() + self.option_name = option_name + + def __str__(self): + return f"Unknown option name: {self.option_name}" + + +class InvalidOptionValueType(UserError): + """ + Invalid option value type exception. + """ + def __init__(self, option, value): + super().__init__() + self.option = option + self.value = value + + def __str__(self): + return f"Invalid value type for option {self.option.name}. "\ + "Expected {self.option.value.__name__} but got "\ + "value {self.value} with type {type(self.value).__name__}" + + +class UserBadIPv4Error(UserError): + """ + Invalid IPv4 address value exception. + """ + def __init__(self, addr): + super().__init__() + self.addr = addr + + def __str__(self): + return f"{self.addr}' is not a valid IPv4 address" + + def _render_traceback_(self): + return [str(self)] + + +class UserBadIPv6Error(UserError): + """ + Invalid IPv6 address value exception. + """ + def __init__(self, addr): + super().__init__() + self.addr = addr + + def __str__(self): + return f"'{self.addr}' is not a valid IPv6 address" + + def _render_traceback_(self): + return [str(self)] + + +class UserBadMacError(UserError): + """ + Invalid MAC address value exception. + """ + def __init__(self, addr): + super().__init__() + self.addr = addr + + def __str__(self): + return f"'{self.addr}' is not a valid MAC address" + + def _render_traceback_(self): + return [str(self)] + + +class UserBadValueError(UserError): + """ + Invalid value exception. + """ + def __init__(self, info=""): + super().__init__() + self.info = info + + def __str__(self): + return self.info + + def _render_traceback_(self): + return [str(self)] diff --git a/src/device/service/drivers/p4/p4_global_options.py b/src/device/service/drivers/p4/p4_global_options.py new file mode 100644 index 0000000000000000000000000000000000000000..86043b671e9316dfeff2fb12db8ab3088386382a --- /dev/null +++ b/src/device/service/drivers/p4/p4_global_options.py @@ -0,0 +1,204 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +P4Runtime global options. +""" + +import enum +try: + from .p4_exception import UnknownOptionName, InvalidOptionValueType +except ImportError: + from p4_exception import UnknownOptionName, InvalidOptionValueType + + +@enum.unique +class Options(enum.Enum): + """ + P4 options. + """ + canonical_bytestrings = bool + + +class GlobalOptions: + """ + P4 global options. + """ + option_defaults = { + Options.canonical_bytestrings: True, + } + + option_helpstrings = { + Options.canonical_bytestrings: """ +Use byte-padded legacy format for binary strings sent to the P4Runtime server, +instead of the canonical representation. See P4Runtime specification for details. +""" + } + + def __init__(self): + self._values = {} + self.reset() + self._option_names = [option.name for option in Options] + self._set_docstring() + + def reset(self): + """ + Reset all options to their defaults. + + :return: void + """ + for option in Options: + assert option in GlobalOptions.option_defaults + self._values[option] = GlobalOptions.option_defaults[option] + + def _supported_options_as_str(self): + """ + Return a comma-separated string of supported options. + + :return: string of supported options + """ + return ", ".join([f"{o.name} ({o.value.__name__})" for o in Options]) + + def _supported_options_as_str_verbose(self): + """ + Return a detailed comma-separated string of supported options. + + :return: string of supported options + """ + opt_str = "" + for option in Options: + opt_str += f"Option name: {option.name}\n" + opt_str += f"Type: {option.value.__name__}\n" + opt_str += f"Default value: " \ + f"{GlobalOptions.option_defaults[option]}\n" + opt_str += f"Description: " \ + f"{GlobalOptions.option_helpstrings.get(option, 'N/A')}\n" + opt_str += "\n" + return opt_str[:-1] + + def _set_docstring(self): + """ + Set the documentation for this object. + + :return: void + """ + self.__doc__ = f""" +Manage global options for the P4Runtime shell. +Supported options are: {self._supported_options_as_str()} +To set the value of a global option, use GLOBAL_OPTIONS["<option name>"] = <option value> +To access the current value of a global option, use GLOBAL_OPTIONS.["<option name>"] +To reset all options to their default value, use GLOBAL_OPTIONS.reset + +{self._supported_options_as_str_verbose()} +""" + + def __dir__(self): + """ + Return all names in this scope. + + :return: list of names in scope + """ + return ["reset", "set", "get"] + + def set_option(self, option, value): + """ + Set an option's value. + + :param option: option to set + :param value: option value + :return: void + """ + self._values[option] = value + + def get_option(self, option): + """ + Get an option's value. + + :param option: option to get + :return: option value + """ + return self._values[option] + + def set(self, name, value): + """ + Create an option and set its value. + + :param name: option name + :param value: option value + :return: void + """ + try: + option = Options[name] + except KeyError as ex: + raise UnknownOptionName(name) from ex + if not isinstance(value, option.value): + raise InvalidOptionValueType(option, value) + self.set_option(option, value) + + def get(self, name): + """ + Get option by name. + + :param name: option name + :return: option + """ + try: + option = Options[name] + except KeyError as ex: + raise UnknownOptionName(name) from ex + return self.get_option(option) + + def __setitem__(self, name, value): + self.set(name, value) + + def __getitem__(self, name): + return self.get(name) + + def __str__(self): + return '\n'.join([f"{o.name}: {v}" for o, v in self._values.items()]) + + +GLOBAL_OPTIONS = GlobalOptions() + + +def to_canonical_bytes(bytes_): + """ + Convert to canonical bytes. + + :param bytes_: byte stream + :return: canonical bytes + """ + if len(bytes_) == 0: + return bytes_ + num_zeros = 0 + for byte in bytes_: + if byte != 0: + break + num_zeros += 1 + if num_zeros == len(bytes_): + return bytes_[:1] + return bytes_[num_zeros:] + + +def make_canonical_if_option_set(bytes_): + """ + Convert to canonical bytes if option is set. + + :param bytes_: byte stream + :return: canonical bytes + """ + + if GLOBAL_OPTIONS.get_option(Options.canonical_bytestrings): + return to_canonical_bytes(bytes_) + return bytes_ diff --git a/src/device/service/drivers/p4/p4_manager.py b/src/device/service/drivers/p4/p4_manager.py new file mode 100644 index 0000000000000000000000000000000000000000..dc25e80b5803bfdec7d802d41c136865f4c045e3 --- /dev/null +++ b/src/device/service/drivers/p4/p4_manager.py @@ -0,0 +1,5987 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +P4Runtime manager. +""" + +import enum +import os +import queue +import time +import logging +from collections import Counter, OrderedDict +from threading import Thread +from tabulate import tabulate +from p4.v1 import p4runtime_pb2 +from p4.config.v1 import p4info_pb2 + +try: + from .p4_client import P4RuntimeClient, P4RuntimeException,\ + P4RuntimeWriteException, WriteOperation, parse_p4runtime_error + from .p4_context import P4RuntimeEntity, P4Type, Context + from .p4_global_options import make_canonical_if_option_set + from .p4_common import encode,\ + parse_resource_string_from_json, parse_resource_integer_from_json,\ + parse_resource_bytes_from_json, parse_match_operations_from_json,\ + parse_action_parameters_from_json, parse_integer_list_from_json + from .p4_exception import UserError, InvalidP4InfoError +except ImportError: + from p4_client import P4RuntimeClient, P4RuntimeException,\ + P4RuntimeWriteException, WriteOperation, parse_p4runtime_error + from p4_context import P4RuntimeEntity, P4Type, Context + from p4_global_options import make_canonical_if_option_set + from p4_common import encode,\ + parse_resource_string_from_json, parse_resource_integer_from_json,\ + parse_resource_bytes_from_json, parse_match_operations_from_json,\ + parse_action_parameters_from_json, parse_integer_list_from_json + from p4_exception import UserError, InvalidP4InfoError + +# Logger instance +LOGGER = logging.getLogger(__name__) + +# Global P4Runtime context +CONTEXT = Context() + +# Global P4Runtime client +CLIENT = None + +# Constant P4 entities +KEY_TABLE = "table" +KEY_ACTION = "action" +KEY_ACTION_PROFILE = "action_profile" +KEY_COUNTER = "counter" +KEY_DIR_COUNTER = "direct_counter" +KEY_METER = "meter" +KEY_DIR_METER = "direct_meter" +KEY_CTL_PKT_METADATA = "controller_packet_metadata" + + +def get_context(): + """ + Return P4 context. + + :return: context object + """ + return CONTEXT + + +def get_client(): + """ + Return P4 client. + + :return: P4Runtime client object + """ + return CLIENT + + +def get_api_version(): + """ + Get the supported P4Runtime API version. + + :return: API version + """ + return CLIENT.api_version() + + +def get_table_type(table): + """ + Assess the type of P4 table based upon the matching scheme. + + :param table: P4 table + :return: P4 table type + """ + for m_f in table.match_fields: + if m_f.match_type == p4info_pb2.MatchField.EXACT: + return p4info_pb2.MatchField.EXACT + if m_f.match_type == p4info_pb2.MatchField.LPM: + return p4info_pb2.MatchField.LPM + if m_f.match_type == p4info_pb2.MatchField.TERNARY: + return p4info_pb2.MatchField.TERNARY + if m_f.match_type == p4info_pb2.MatchField.RANGE: + return p4info_pb2.MatchField.RANGE + if m_f.match_type == p4info_pb2.MatchField.OPTIONAL: + return p4info_pb2.MatchField.OPTIONAL + return None + + +def match_type_to_str(match_type): + """ + Convert table match type to string. + + :param match_type: table match type object + :return: table match type string + """ + if match_type == p4info_pb2.MatchField.EXACT: + return "Exact" + if match_type == p4info_pb2.MatchField.LPM: + return "LPM" + if match_type == p4info_pb2.MatchField.TERNARY: + return "Ternary" + if match_type == p4info_pb2.MatchField.RANGE: + return "Range" + if match_type == p4info_pb2.MatchField.OPTIONAL: + return "Optional" + return None + + +def insert_table_entry_exact( + table_name, match_map, action_name, action_params, metadata, + cnt_pkt=-1, cnt_byte=-1): + """ + Insert an entry into an exact match table. + + :param table_name: P4 table name + :param match_map: Map of match operations + :param action_name: Action name + :param action_params: Map of action parameters + :param metadata: table metadata + :param cnt_pkt: packet count + :param cnt_byte: byte count + :return: inserted entry + """ + assert match_map, "Table entry without match operations is not accepted" + assert action_name, "Table entry without action is not accepted" + + table_entry = TableEntry(table_name)(action=action_name) + + for match_k, match_v in match_map.items(): + table_entry.match[match_k] = match_v + + for action_k, action_v in action_params.items(): + table_entry.action[action_k] = action_v + + if metadata: + table_entry.metadata = metadata + + if cnt_pkt > 0: + table_entry.counter_data.packet_count = cnt_pkt + + if cnt_byte > 0: + table_entry.counter_data.byte_count = cnt_byte + + ex_msg = "" + try: + table_entry.insert() + LOGGER.info("Inserted exact table entry: %s", table_entry) + except P4RuntimeWriteException as ex: + ex_msg = str(ex) + except P4RuntimeException as ex: + raise P4RuntimeException from ex + + # Table entry exists, needs to be modified + if "ALREADY_EXISTS" in ex_msg: + table_entry.modify() + LOGGER.info("Updated exact table entry: %s", table_entry) + + return table_entry + + +def insert_table_entry_ternary( + table_name, match_map, action_name, action_params, metadata, + priority, cnt_pkt=-1, cnt_byte=-1): + """ + Insert an entry into a ternary match table. + + :param table_name: P4 table name + :param match_map: Map of match operations + :param action_name: Action name + :param action_params: Map of action parameters + :param metadata: table metadata + :param priority: entry priority + :param cnt_pkt: packet count + :param cnt_byte: byte count + :return: inserted entry + """ + assert match_map, "Table entry without match operations is not accepted" + assert action_name, "Table entry without action is not accepted" + + table_entry = TableEntry(table_name)(action=action_name) + + for match_k, match_v in match_map.items(): + table_entry.match[match_k] = match_v + + for action_k, action_v in action_params.items(): + table_entry.action[action_k] = action_v + + table_entry.priority = priority + + if metadata: + table_entry.metadata = metadata + + if cnt_pkt > 0: + table_entry.counter_data.packet_count = cnt_pkt + + if cnt_byte > 0: + table_entry.counter_data.byte_count = cnt_byte + + ex_msg = "" + try: + table_entry.insert() + LOGGER.info("Inserted ternary table entry: %s", table_entry) + except P4RuntimeWriteException as ex: + ex_msg = str(ex) + except P4RuntimeException as ex: + raise P4RuntimeException from ex + + # Table entry exists, needs to be modified + if "ALREADY_EXISTS" in ex_msg: + table_entry.modify() + LOGGER.info("Updated ternary table entry: %s", table_entry) + + return table_entry + + +def insert_table_entry_range( + table_name, match_map, action_name, action_params, metadata, + priority, cnt_pkt=-1, cnt_byte=-1): # pylint: disable=unused-argument + """ + Insert an entry into a range match table. + + :param table_name: P4 table name + :param match_map: Map of match operations + :param action_name: Action name + :param action_params: Map of action parameters + :param metadata: table metadata + :param priority: entry priority + :param cnt_pkt: packet count + :param cnt_byte: byte count + :return: inserted entry + """ + assert match_map, "Table entry without match operations is not accepted" + assert action_name, "Table entry without action is not accepted" + + raise NotImplementedError( + "Range-based table insertion not implemented yet") + + +def insert_table_entry_optional( + table_name, match_map, action_name, action_params, metadata, + priority, cnt_pkt=-1, cnt_byte=-1): # pylint: disable=unused-argument + """ + Insert an entry into an optional match table. + + :param table_name: P4 table name + :param match_map: Map of match operations + :param action_name: Action name + :param action_params: Map of action parameters + :param metadata: table metadata + :param priority: entry priority + :param cnt_pkt: packet count + :param cnt_byte: byte count + :return: inserted entry + """ + assert match_map, "Table entry without match operations is not accepted" + assert action_name, "Table entry without action is not accepted" + + raise NotImplementedError( + "Optional-based table insertion not implemented yet") + + +class P4Manager: + """ + Class to manage the runtime entries of a P4 pipeline. + """ + + def __init__(self, device_id: int, ip_address: str, port: int, + election_id: tuple, role_name=None, ssl_options=None): + global CLIENT + + self.__id = device_id + self.__ip_address = ip_address + self.__port = int(port) + self.__endpoint = f"{self.__ip_address}:{self.__port}" + CLIENT = P4RuntimeClient( + self.__id, self.__endpoint, election_id, role_name, ssl_options) + self.__p4info = None + + # Internal memory for whitebox management + # | -> P4 entities + self.p4_objects = {} + + # | -> P4 entities + self.table_entries = {} + self.counter_entries = {} + self.direct_counter_entries = {} + self.meter_entries = {} + self.direct_meter_entries = {} + self.multicast_groups = {} + self.clone_session_entries = {} + self.action_profile_members = {} + self.action_profile_groups = {} + + def start(self, p4bin_path, p4info_path): + """ + Start the P4 manager. This involves: + (i) setting the forwarding pipeline of the target switch, + (ii) creating a P4 context object, + (iii) Discovering all the entities of the pipeline, and + (iv) initializing necessary data structures of the manager + + :param p4bin_path: Path to the P4 binary file + :param p4info_path: Path to the P4 info file + :return: void + """ + + if not p4bin_path or not os.path.exists(p4bin_path): + LOGGER.warning("P4 binary file not found") + + if not p4info_path or not os.path.exists(p4info_path): + LOGGER.warning("P4 info file not found") + + # Forwarding pipeline is only set iff both files are present + if p4bin_path and p4info_path: + try: + CLIENT.set_fwd_pipe_config(p4info_path, p4bin_path) + except FileNotFoundError as ex: + LOGGER.critical(ex) + CLIENT.tear_down() + raise FileNotFoundError(ex) from ex + except P4RuntimeException as ex: + LOGGER.critical("Error when setting config") + LOGGER.critical(ex) + CLIENT.tear_down() + raise P4RuntimeException(ex) from ex + except Exception as ex: # pylint: disable=broad-except + LOGGER.critical("Error when setting config") + CLIENT.tear_down() + raise Exception(ex) from ex + + try: + self.__p4info = CLIENT.get_p4info() + except P4RuntimeException as ex: + LOGGER.critical("Error when retrieving P4Info") + LOGGER.critical(ex) + CLIENT.tear_down() + raise P4RuntimeException(ex) from ex + + CONTEXT.set_p4info(self.__p4info) + self.__discover_objects() + self.__init_objects() + LOGGER.info("P4Runtime manager started") + + def stop(self): + """ + Stop the P4 manager. This involves: + (i) tearing the P4Runtime client down and + (ii) cleaning up the manager's internal memory + + :return: void + """ + global CLIENT + + # gRPC client must already be instantiated + assert CLIENT + + # Trigger connection tear down with the P4Runtime server + CLIENT.tear_down() + CLIENT = None + self.__clear() + LOGGER.info("P4Runtime manager stopped") + + def __clear(self): + """ + Reset basic members of the P4 manager. + + :return: void + """ + self.__id = None + self.__ip_address = None + self.__port = None + self.__endpoint = None + self.__clear_state() + + def __clear_state(self): + """ + Reset the manager's internal memory. + + :return: void + """ + self.table_entries.clear() + self.counter_entries.clear() + self.direct_counter_entries.clear() + self.meter_entries.clear() + self.direct_meter_entries.clear() + self.multicast_groups.clear() + self.clone_session_entries.clear() + self.action_profile_members.clear() + self.action_profile_groups.clear() + self.p4_objects.clear() + + def __init_objects(self): + """ + Parse the discovered P4 objects and initialize internal memory for all + the underlying P4 entities. + + :return: void + """ + global KEY_TABLE, KEY_ACTION, KEY_ACTION_PROFILE, \ + KEY_COUNTER, KEY_DIR_COUNTER, \ + KEY_METER, KEY_DIR_METER, \ + KEY_CTL_PKT_METADATA + + KEY_TABLE = P4Type.table.name + KEY_ACTION = P4Type.action.name + KEY_ACTION_PROFILE = P4Type.action_profile.name + KEY_COUNTER = P4Type.counter.name + KEY_DIR_COUNTER = P4Type.direct_counter.name + KEY_METER = P4Type.meter.name + KEY_DIR_METER = P4Type.direct_meter.name + KEY_CTL_PKT_METADATA = P4Type.controller_packet_metadata.name + assert (k for k in [ + KEY_TABLE, KEY_ACTION, KEY_ACTION_PROFILE, + KEY_COUNTER, KEY_DIR_COUNTER, + KEY_METER, KEY_DIR_METER, + KEY_CTL_PKT_METADATA + ]) + + if not self.p4_objects: + LOGGER.warning( + "Cannot initialize internal memory without discovering " + "the pipeline\'s P4 objects") + return + + # Initialize all sorts of entries + if KEY_TABLE in self.p4_objects: + for table in self.p4_objects[KEY_TABLE]: + self.table_entries[table.name] = [] + + if KEY_COUNTER in self.p4_objects: + for cnt in self.p4_objects[KEY_COUNTER]: + self.counter_entries[cnt.name] = [] + + if KEY_DIR_COUNTER in self.p4_objects: + for d_cnt in self.p4_objects[KEY_DIR_COUNTER]: + self.direct_counter_entries[d_cnt.name] = [] + + if KEY_METER in self.p4_objects: + for meter in self.p4_objects[KEY_METER]: + self.meter_entries[meter.name] = [] + + if KEY_DIR_METER in self.p4_objects: + for d_meter in self.p4_objects[KEY_DIR_METER]: + self.direct_meter_entries[d_meter.name] = [] + + if KEY_ACTION_PROFILE in self.p4_objects: + for act_prof in self.p4_objects[KEY_ACTION_PROFILE]: + self.action_profile_members[act_prof.name] = [] + self.action_profile_groups[act_prof.name] = [] + + def __discover_objects(self): + """ + Discover and store all P4 objects. + + :return: void + """ + self.__clear_state() + + for obj_type in P4Type: + for obj in P4Objects(obj_type): + if obj_type.name not in self.p4_objects: + self.p4_objects[obj_type.name] = [] + self.p4_objects[obj_type.name].append(obj) + + def get_table(self, table_name): + """ + Get a P4 table by name. + + :param table_name: P4 table name + :return: P4 table object + """ + if KEY_TABLE not in self.p4_objects: + return None + for table in self.p4_objects[KEY_TABLE]: + if table.name == table_name: + return table + return None + + def get_tables(self): + """ + Get a list of all P4 tables. + + :return: list of P4 tables or empty list + """ + if KEY_TABLE not in self.p4_objects: + return [] + return self.p4_objects[KEY_TABLE] + + def get_action(self, action_name): + """ + Get action by name. + + :param action_name: name of a P4 action + :return: action object or None + """ + if KEY_ACTION not in self.p4_objects: + return None + for action in self.p4_objects[KEY_ACTION]: + if action.name == action_name: + return action + return None + + def get_actions(self): + """ + Get a list of all P4 actions. + + :return: list of P4 actions or empty list + """ + if KEY_ACTION not in self.p4_objects: + return [] + return self.p4_objects[KEY_ACTION] + + def get_action_profile(self, action_prof_name): + """ + Get action profile by name. + + :param action_prof_name: name of the action profile + :return: action profile object or None + """ + if KEY_ACTION_PROFILE not in self.p4_objects: + return None + for action_prof in self.p4_objects[KEY_ACTION_PROFILE]: + if action_prof.name == action_prof_name: + return action_prof + return None + + def get_action_profiles(self): + """ + Get a list of all P4 action profiles. + + :return: list of P4 action profiles or empty list + """ + if KEY_ACTION_PROFILE not in self.p4_objects: + return [] + return self.p4_objects[KEY_ACTION_PROFILE] + + def get_counter(self, cnt_name): + """ + Get counter by name. + + :param cnt_name: name of a P4 counter + :return: counter object or None + """ + if KEY_COUNTER not in self.p4_objects: + return None + for cnt in self.p4_objects[KEY_COUNTER]: + if cnt.name == cnt_name: + return cnt + return None + + def get_counters(self): + """ + Get a list of all P4 counters. + + :return: list of P4 counters or empty list + """ + if KEY_COUNTER not in self.p4_objects: + return [] + return self.p4_objects[KEY_COUNTER] + + def get_direct_counter(self, dir_cnt_name): + """ + Get direct counter by name. + + :param dir_cnt_name: name of a direct P4 counter + :return: direct counter object or None + """ + if KEY_DIR_COUNTER not in self.p4_objects: + return None + for d_cnt in self.p4_objects[KEY_DIR_COUNTER]: + if d_cnt.name == dir_cnt_name: + return d_cnt + return None + + def get_direct_counters(self): + """ + Get a list of all direct P4 counters. + + :return: list of direct P4 counters or empty list + """ + if KEY_DIR_COUNTER not in self.p4_objects: + return [] + return self.p4_objects[KEY_DIR_COUNTER] + + def get_meter(self, meter_name): + """ + Get meter by name. + + :param meter_name: name of a P4 meter + :return: meter object or None + """ + if KEY_METER not in self.p4_objects: + return None + for meter in self.p4_objects[KEY_METER]: + if meter.name == meter_name: + return meter + return None + + def get_meters(self): + """ + Get a list of all P4 meters. + + :return: list of P4 meters or empty list + """ + if KEY_METER not in self.p4_objects: + return [] + return self.p4_objects[KEY_METER] + + def get_direct_meter(self, dir_meter_name): + """ + Get direct meter by name. + + :param dir_meter_name: name of a direct P4 meter + :return: direct meter object or None + """ + if KEY_DIR_METER not in self.p4_objects: + return None + for d_meter in self.p4_objects[KEY_DIR_METER]: + if d_meter.name == dir_meter_name: + return d_meter + return None + + def get_direct_meters(self): + """ + Get a list of all direct P4 meters. + + :return: list of direct P4 meters or empty list + """ + if KEY_DIR_METER not in self.p4_objects: + return [] + return self.p4_objects[KEY_DIR_METER] + + def get_ctl_pkt_metadata(self, ctl_pkt_meta_name): + """ + Get a packet replication object by name. + + :param ctl_pkt_meta_name: name of a P4 packet replication object + :return: P4 packet replication object or None + """ + if KEY_CTL_PKT_METADATA not in self.p4_objects: + return None + for pkt_meta in self.p4_objects[KEY_CTL_PKT_METADATA]: + if ctl_pkt_meta_name == pkt_meta.name: + return pkt_meta + return None + + def get_resource_keys(self): + """ + Retrieve the available P4 resource keys. + + :return: list of P4 resource keys + """ + return list(self.p4_objects.keys()) + + def count_active_entries(self): + """ + Count the number of active entries across all supported P4 entities. + + :return: active number of entries + """ + tot_cnt = \ + self.count_table_entries_all() + \ + self.count_counter_entries_all() + \ + self.count_direct_counter_entries_all() + \ + self.count_meter_entries_all() + \ + self.count_direct_meter_entries_all() + \ + self.count_action_prof_member_entries_all() + \ + self.count_action_prof_group_entries_all() + + return tot_cnt + + ############################################################################ + # Table methods + ############################################################################ + def get_table_names(self): + """ + Retrieve a list of P4 table names. + + :return: list of P4 table names + """ + if KEY_TABLE not in self.p4_objects: + return [] + return list(table.name for table in self.p4_objects[KEY_TABLE]) + + def get_table_entries(self, table_name, action_name=None): + """ + Get a list of P4 table entries by table name and optionally by action. + + :param table_name: name of a P4 table + :param action_name: action name + :return: list of P4 table entries or None + """ + if table_name not in self.table_entries: + return None + self.table_entries[table_name].clear() + self.table_entries[table_name] = [] + + try: + for count, table_entry in enumerate( + TableEntry(table_name)(action=action_name).read()): + LOGGER.debug( + "Table %s - Entry %d\n%s", table_name, count, table_entry) + self.table_entries[table_name].append(table_entry) + return self.table_entries[table_name] + except P4RuntimeException as ex: + LOGGER.error(ex) + return [] + + def table_entries_to_json(self, table_name): + """ + Encode all entries of a P4 table into a JSON object. + + :param table_name: name of a P4 table + :return: JSON object with table entries + """ + if (KEY_TABLE not in self.p4_objects) or \ + not self.p4_objects[KEY_TABLE]: + LOGGER.warning("No table entries to retrieve\n") + return {} + + table_res = {} + + for table in self.p4_objects[KEY_TABLE]: + if not table.name == table_name: + continue + + entries = self.get_table_entries(table.name) + if len(entries) == 0: + continue + + table_res["table-name"] = table_name + + for ent in entries: + entry_match_field = "\n".join(ent.match.fields()) + entry_match_type = match_type_to_str( + ent.match.match_type(entry_match_field)) + + table_res["id"] = ent.id + table_res["match-fields"] = [] + for match_field in ent.match.fields(): + table_res["match-fields"].append( + { + "match-field": match_field, + "match-value": ent.match.value(match_field), + "match-type": entry_match_type + } + ) + table_res["actions"] = [] + table_res["actions"].append( + { + "action-id": ent.action.id(), + "action": ent.action.alias() + } + ) + table_res["priority"] = ent.priority + table_res["is-default"] = ent.is_default + table_res["idle-timeout"] = ent.idle_timeout_ns + if ent.metadata: + table_res["metadata"] = ent.metadata + + return table_res + + def count_table_entries(self, table_name, action_name=None): + """ + Count the number of entries in a P4 table. + + :param table_name: name of a P4 table + :param action_name: action name + :return: number of P4 table entries or negative integer + upon missing table + """ + entries = self.get_table_entries(table_name, action_name) + if entries is None: + return -1 + return len(entries) + + def count_table_entries_all(self): + """ + Count all entries in a P4 table. + + :return: number of P4 table entries + """ + total_cnt = 0 + for table_name in self.get_table_names(): + cnt = self.count_table_entries(table_name) + if cnt < 0: + continue + total_cnt += cnt + return total_cnt + + def table_entry_operation_from_json( + self, json_resource, operation: WriteOperation): + """ + Parse a JSON-based table entry and insert/update/delete it + into/from the switch. + + :param json_resource: JSON-based table entry + :param operation: Write operation (i.e., insert, modify, delete) + to perform. + :return: inserted entry or None in case of parsing error + """ + + table_name = parse_resource_string_from_json( + json_resource, "table-name") + match_map = parse_match_operations_from_json(json_resource) + action_name = parse_resource_string_from_json( + json_resource, "action-name") + action_params = parse_action_parameters_from_json(json_resource) + priority = parse_resource_integer_from_json(json_resource, "priority") + metadata = parse_resource_bytes_from_json(json_resource, "metadata") + + if operation in [WriteOperation.insert, WriteOperation.update]: + LOGGER.debug("Table entry to insert/update: %s", json_resource) + return self.insert_table_entry( + table_name=table_name, + match_map=match_map, + action_name=action_name, + action_params=action_params, + priority=priority, + metadata=metadata if metadata else None + ) + if operation == WriteOperation.delete: + LOGGER.debug("Table entry to delete: %s", json_resource) + return self.delete_table_entry( + table_name=table_name, + match_map=match_map, + action_name=action_name, + action_params=action_params, + priority=priority + ) + return None + + def insert_table_entry(self, table_name, + match_map, action_name, action_params, + priority, metadata=None, cnt_pkt=-1, cnt_byte=-1): + """ + Insert an entry into a P4 table. + This method has internal logic to discriminate among: + (i) Exact matches, + (ii) Ternary matches, + (iii) LPM matches, + (iv) Range matches, and + (v) Optional matches + + :param table_name: name of a P4 table + :param match_map: map of match operations + :param action_name: action name + :param action_params: map of action parameters + :param priority: entry priority + :param metadata: entry metadata + :param cnt_pkt: packet count + :param cnt_byte: byte count + :return: inserted entry + """ + table = self.get_table(table_name) + assert table, \ + "P4 pipeline does not implement table " + table_name + + if not get_table_type(table): + msg = f"Table {table_name} is undefined, cannot insert entry" + LOGGER.error(msg) + raise UserError(msg) + + # Exact match is supported + if get_table_type(table) == p4info_pb2.MatchField.EXACT: + if priority != 0: + msg = f"Table {table_name} is non-ternary, priority must be 0" + LOGGER.error(msg) + raise UserError(msg) + return insert_table_entry_exact( + table_name, match_map, action_name, action_params, metadata, + cnt_pkt, cnt_byte) + + # Ternary and LPM matches are supported + if get_table_type(table) in \ + [p4info_pb2.MatchField.TERNARY, p4info_pb2.MatchField.LPM]: + if priority == 0: + msg = f"Table {table_name} is ternary, priority must be != 0" + LOGGER.error(msg) + raise UserError(msg) + return insert_table_entry_ternary( + table_name, match_map, action_name, action_params, metadata, + priority, cnt_pkt, cnt_byte) + + # TODO: Cover RANGE match # pylint: disable=W0511 + if get_table_type(table) == p4info_pb2.MatchField.RANGE: + return insert_table_entry_range( + table_name, match_map, action_name, action_params, metadata, + priority, cnt_pkt, cnt_byte) + + # TODO: Cover OPTIONAL match # pylint: disable=W0511 + if get_table_type(table) == p4info_pb2.MatchField.OPTIONAL: + return insert_table_entry_optional( + table_name, match_map, action_name, action_params, metadata, + priority, cnt_pkt, cnt_byte) + + return None + + def delete_table_entry(self, table_name, + match_map, action_name, action_params, priority=0): + """ + Delete an entry from a P4 table. + + :param table_name: name of a P4 table + :param match_map: map of match operations + :param action_name: action name + :param action_params: map of action parameters + :param priority: entry priority + :return: deleted entry + """ + table = self.get_table(table_name) + assert table, \ + "P4 pipeline does not implement table " + table_name + + if not get_table_type(table): + msg = f"Table {table_name} is undefined, cannot delete entry" + LOGGER.error(msg) + raise UserError(msg) + + table_entry = TableEntry(table_name)(action=action_name) + + for match_k, match_v in match_map.items(): + table_entry.match[match_k] = match_v + + for action_k, action_v in action_params.items(): + table_entry.action[action_k] = action_v + + if get_table_type(table) == p4info_pb2.MatchField.EXACT: + if priority != 0: + msg = f"Table {table_name} is non-ternary, priority must be 0" + LOGGER.error(msg) + raise UserError(msg) + + if get_table_type(table) in \ + [p4info_pb2.MatchField.TERNARY, p4info_pb2.MatchField.LPM]: + if priority == 0: + msg = f"Table {table_name} is ternary, priority must be != 0" + LOGGER.error(msg) + raise UserError(msg) + + # TODO: Ensure correctness of RANGE & OPTIONAL # pylint: disable=W0511 + if get_table_type(table) in \ + [p4info_pb2.MatchField.RANGE, p4info_pb2.MatchField.OPTIONAL]: + raise NotImplementedError( + "Range and optional-based table deletion not implemented yet") + + table_entry.priority = priority + + table_entry.delete() + LOGGER.info("Deleted entry %s from table: %s", table_entry, table_name) + + return table_entry + + def delete_table_entries(self, table_name): + """ + Delete all entries of a P4 table. + + :param table_name: name of a P4 table + :return: void + """ + table = self.get_table(table_name) + assert table, \ + "P4 pipeline does not implement table " + table_name + + if not get_table_type(table): + msg = f"Table {table_name} is undefined, cannot delete entry" + LOGGER.error(msg) + raise UserError(msg) + + TableEntry(table_name).read(function=lambda x: x.delete()) + LOGGER.info("Deleted all entries from table: %s", table_name) + + def print_table_entries_spec(self, table_name): + """ + Print the specification of a P4 table. + Specification covers: + (i) match id, + (ii) match field name (e.g., ip_proto), + (iii) match type (e.g., exact, ternary, etc.), + (iv) match bitwidth + (v) action id, and + (vi) action name + + :param table_name: name of a P4 table + :return: void + """ + if (KEY_TABLE not in self.p4_objects) or \ + not self.p4_objects[KEY_TABLE]: + LOGGER.warning("No table specification to print\n") + return + + for table in self.p4_objects[KEY_TABLE]: + if not table.name == table_name: + continue + + entry = [] + + for i, match_field in enumerate(table.match_fields): + table_name = table.name if i == 0 else "" + match_field_id = match_field.id + match_field_name = match_field.name + match_type_str = match_type_to_str(match_field.match_type) + match_field_bitwidth = match_field.bitwidth + + entry.append( + [ + table_name, str(match_field_id), match_field_name, + match_type_str, str(match_field_bitwidth) + ] + ) + + print( + tabulate( + entry, + headers=[ + KEY_TABLE, "match id", "match field", + "match type", "match width" + ], + stralign="right", + tablefmt="pretty" + ) + ) + + entry.clear() + + for i, action in enumerate(table.action_refs): + table_name = table.name if i == 0 else "" + action_id = action.id + action_name = CONTEXT.get_name_from_id(action.id) + entry.append([table_name, str(action_id), action_name]) + + print( + tabulate( + entry, + headers=[KEY_TABLE, "action id", "action name"], + stralign="right", + tablefmt="pretty" + ) + ) + print("\n") + entry.clear() + + def print_table_entries_summary(self): + """ + Print a summary of a P4 table state. + Summary covers: + (i) table name, + (ii) number of entries in the table, and + (iii) a string of \n-separated entry IDs. + + :return: void + """ + if (KEY_TABLE not in self.p4_objects) or \ + not self.p4_objects[KEY_TABLE]: + LOGGER.warning("No tables to print\n") + return + + entry = [] + + for table in self.p4_objects[KEY_TABLE]: + table_name = table.name + entries = self.get_table_entries(table_name) + entries_nb = len(entries) + entry_ids_str = "\n".join(str(e.id) for e in entries) \ + if entries_nb > 0 else "-" + + entry.append([table_name, entries_nb, entry_ids_str]) + + print( + tabulate( + entry, + headers=[KEY_TABLE, "# of entries", "entry ids"], + stralign="right", + tablefmt="pretty" + ) + ) + print("\n") + + def print_table_entries(self, table_name): + """ + Print all entries of a P4 table. + + :param table_name: name of a P4 table + :return: void + """ + if (KEY_TABLE not in self.p4_objects) or \ + not self.p4_objects[KEY_TABLE]: + LOGGER.warning("No table entries to print\n") + return + + for table in self.p4_objects[KEY_TABLE]: + if not table.name == table_name: + continue + + entry = [] + + entries = self.get_table_entries(table.name) + for ent in entries: + entry_id = ent.id + mfs = ent.match.fields() + entry_match_field = "\n".join(mfs) + entry_match_value = "\n".join( + ent.match.value(match_field) for match_field in mfs + ) + entry_match_type = match_type_to_str( + ent.match.match_type(entry_match_field)) + entry_action_id = ent.action.id() + entry_action = ent.action.alias() + entry_priority = ent.priority + entry_is_default = ent.is_default + entry_idle_timeout_ns = ent.idle_timeout_ns + entry_metadata = ent.metadata + + entry.append( + [ + table_name, str(entry_id), + entry_match_field, entry_match_value, entry_match_type, + str(entry_action_id), entry_action, + str(entry_priority), str(entry_is_default), + str(entry_idle_timeout_ns), str(entry_metadata) + ] + ) + + if not entry: + entry.append([table_name] + ["-"] * 10) + + print( + tabulate( + entry, + headers=[ + KEY_TABLE, "table id", + "match field", "match value", "match type", + "action id", "action", "priority", "is default", + "idle timeout (ns)", "metadata" + ], + stralign="right", + tablefmt="pretty", + ) + ) + print("\n") + + ############################################################################ + + ############################################################################ + # Counter methods + ############################################################################ + def get_counter_names(self): + """ + Retrieve a list of P4 counter names. + + :return: list of P4 counter names + """ + if KEY_COUNTER not in self.p4_objects: + return [] + return list(cnt.name for cnt in self.p4_objects[KEY_COUNTER]) + + def get_counter_entries(self, cnt_name): + """ + Get a list of P4 counters by name. + + :param cnt_name: name of a P4 counter + :return: list of P4 counters or None + """ + if cnt_name not in self.counter_entries: + return None + self.counter_entries[cnt_name].clear() + self.counter_entries[cnt_name] = [] + + try: + for count, cnt_entry in enumerate(CounterEntry(cnt_name).read()): + LOGGER.debug( + "Counter %s - Entry %d\n%s", cnt_name, count, cnt_entry) + self.counter_entries[cnt_name].append(cnt_entry) + return self.counter_entries[cnt_name] + except P4RuntimeException as ex: + LOGGER.error(ex) + return [] + + def counter_entries_to_json(self, cnt_name): + """ + Encode all counter entries into a JSON object. + + :param cnt_name: counter name + :return: JSON object with counter entries + """ + if (KEY_COUNTER not in self.p4_objects) or \ + not self.p4_objects[KEY_COUNTER]: + LOGGER.warning("No counter entries to retrieve\n") + return {} + + cnt_res = {} + + for cnt in self.p4_objects[KEY_COUNTER]: + if not cnt.name == cnt_name: + continue + + entries = self.get_counter_entries(cnt.name) + if len(entries) == 0: + continue + + cnt_res["counter-name"] = cnt_name + + for ent in entries: + cnt_res["index"] = ent.index + cnt_res["packet-count"] = ent.packet_count + cnt_res["byte-count"] = ent.byte_count + + return cnt_res + + def count_counter_entries(self, cnt_name): + """ + Count the number of P4 counter entries by counter name. + + :param cnt_name: name of a P4 counter + :return: number of P4 counters or negative integer + upon missing counter + """ + entries = self.get_counter_entries(cnt_name) + if entries is None: + return -1 + return len(entries) + + def count_counter_entries_all(self): + """ + Count all entries of a P4 counter. + + :return: number of P4 counter entries + """ + total_cnt = 0 + for cnt_name in self.get_counter_names(): + cnt = self.count_counter_entries(cnt_name) + if cnt < 0: + continue + total_cnt += cnt + return total_cnt + + def counter_entry_operation_from_json(self, + json_resource, + operation: WriteOperation): + """ + Parse a JSON-based counter entry and insert/update/delete it + into/from the switch. + + :param json_resource: JSON-based counter entry + :param operation: Write operation (i.e., insert, modify, delete) + to perform. + :return: inserted entry or None in case of parsing error + """ + cnt_name = parse_resource_string_from_json( + json_resource, "counter-name") + + if operation in [WriteOperation.insert, WriteOperation.update]: + index = parse_resource_integer_from_json( + json_resource, "index") + cnt_pkt = parse_resource_integer_from_json( + json_resource, "packet-count") + cnt_byte = parse_resource_integer_from_json( + json_resource, "byte-count") + + LOGGER.debug("Counter entry to insert/update: %s", json_resource) + return self.insert_counter_entry( + cnt_name=cnt_name, + index=index, + cnt_pkt=cnt_pkt, + cnt_byte=cnt_byte + ) + if operation == WriteOperation.delete: + LOGGER.debug("Counter entry to delete: %s", json_resource) + return self.clear_counter_entry( + cnt_name=cnt_name + ) + return None + + def insert_counter_entry(self, cnt_name, index=None, + cnt_pkt=-1, cnt_byte=-1): + """ + Insert a P4 counter entry. + + :param cnt_name: name of a P4 counter + :param index: counter index + :param cnt_pkt: packet count + :param cnt_byte: byte count + :return: inserted entry + """ + cnt = self.get_counter(cnt_name) + assert cnt, \ + "P4 pipeline does not implement counter " + cnt_name + + cnt_entry = CounterEntry(cnt_name) + + if index: + cnt_entry.index = index + + if cnt_pkt > 0: + cnt_entry.packet_count = cnt_pkt + + if cnt_byte > 0: + cnt_entry.byte_count = cnt_byte + + cnt_entry.modify() + LOGGER.info("Updated counter entry: %s", cnt_entry) + + return cnt_entry + + def clear_counter_entry(self, cnt_name): + """ + Clear the counters of a counter entry by name. + + :param cnt_name: name of a P4 counter + :return: cleared entry + """ + cnt = self.get_counter(cnt_name) + assert cnt, \ + "P4 pipeline does not implement counter " + cnt_name + + cnt_entry = CounterEntry(cnt_name) + cnt_entry.clear_data() + LOGGER.info("Cleared data of counter entry: %s", cnt_entry) + + return cnt_entry + + def print_counter_entries_summary(self): + """ + Print a summary of a P4 counter state. + Summary covers: + (i) counter name, + (ii) number of entries in the table, and + (iii) a string of \n-separated entry IDs. + + :return: void + """ + if (KEY_COUNTER not in self.p4_objects) or \ + not self.p4_objects[KEY_COUNTER]: + LOGGER.warning("No counters to print\n") + return + + entry = [] + + for cnt in self.p4_objects[KEY_COUNTER]: + entries = self.get_counter_entries(cnt.name) + entries_nb = len(entries) + entry_ids_str = ",".join(str(e.id) for e in entries) \ + if entries_nb > 0 else "-" + entry.append([cnt.name, str(entries_nb), entry_ids_str]) + + print( + tabulate( + entry, + headers=[KEY_COUNTER, "# of entries", "entry ids"], + stralign="right", + tablefmt="pretty" + ) + ) + print("\n") + + ############################################################################ + + ############################################################################ + # Direct counter methods + ############################################################################ + def get_direct_counter_names(self): + """ + Retrieve a list of direct P4 counter names. + + :return: list of direct P4 counter names + """ + if KEY_DIR_COUNTER not in self.p4_objects: + return [] + return list(d_cnt.name for d_cnt in self.p4_objects[KEY_DIR_COUNTER]) + + def get_direct_counter_entries(self, d_cnt_name): + """ + Get a list of direct P4 counters by name. + + :param d_cnt_name: name of a direct P4 counter + :return: list of direct P4 counters or None + """ + if d_cnt_name not in self.direct_counter_entries: + return None + self.direct_counter_entries[d_cnt_name].clear() + self.direct_counter_entries[d_cnt_name] = [] + + try: + for count, d_cnt_entry in enumerate( + DirectCounterEntry(d_cnt_name).read()): + LOGGER.debug( + "Direct counter %s - Entry %d\n%s", + d_cnt_name, count, d_cnt_entry) + self.direct_counter_entries[d_cnt_name].append(d_cnt_entry) + return self.direct_counter_entries[d_cnt_name] + except P4RuntimeException as ex: + LOGGER.error("Failed to get direct counter %s entries: %s", + d_cnt_name, str(ex)) + return [] + + def direct_counter_entries_to_json(self, d_cnt_name): + """ + Encode all direct counter entries into a JSON object. + + :param d_cnt_name: direct counter name + :return: JSON object with direct counter entries + """ + if (KEY_DIR_COUNTER not in self.p4_objects) or \ + not self.p4_objects[KEY_DIR_COUNTER]: + LOGGER.warning("No direct counter entries to retrieve\n") + return {} + + d_cnt_res = {} + + for d_cnt in self.p4_objects[KEY_DIR_COUNTER]: + if not d_cnt.name == d_cnt_name: + continue + + entries = self.get_direct_counter_entries(d_cnt.name) + if len(entries) == 0: + continue + + d_cnt_res["direct-counter-name"] = d_cnt_name + + for ent in entries: + d_cnt_res["match-fields"] = [] + for k, v in ent.table_entry.match.items(): + d_cnt_res["match-fields"].append( + { + "match-field": k, + "match-value": v + } + ) + d_cnt_res["priority"] = ent.priority + d_cnt_res["packet-count"] = ent.packet_count + d_cnt_res["byte-count"] = ent.byte_count + + return d_cnt_res + + def count_direct_counter_entries(self, d_cnt_name): + """ + Count the number of direct P4 counter entries by counter name. + + :param d_cnt_name: name of a direct P4 counter + :return: number of direct P4 counters or negative integer + upon missing direct counter + """ + entries = self.get_direct_counter_entries(d_cnt_name) + if entries is None: + return -1 + return len(entries) + + def count_direct_counter_entries_all(self): + """ + Count all entries of a direct P4 counter. + + :return: number of direct P4 counter entries + """ + total_cnt = 0 + for d_cnt_name in self.get_direct_counter_names(): + cnt = self.count_direct_counter_entries(d_cnt_name) + if cnt < 0: + continue + total_cnt += cnt + return total_cnt + + def direct_counter_entry_operation_from_json(self, + json_resource, + operation: WriteOperation): + """ + Parse a JSON-based direct counter entry and insert/update/delete it + into/from the switch. + + :param json_resource: JSON-based direct counter entry + :param operation: Write operation (i.e., insert, modify, delete) + to perform. + :return: inserted entry or None in case of parsing error + """ + d_cnt_name = parse_resource_string_from_json( + json_resource, "direct-counter-name") + + if operation in [WriteOperation.insert, WriteOperation.update]: + match_map = parse_match_operations_from_json(json_resource) + priority = parse_resource_integer_from_json( + json_resource, "priority") + cnt_pkt = parse_resource_integer_from_json( + json_resource, "packet-count") + cnt_byte = parse_resource_integer_from_json( + json_resource, "byte-count") + + LOGGER.debug( + "Direct counter entry to insert/update: %s", json_resource) + return self.insert_direct_counter_entry( + d_cnt_name=d_cnt_name, + match_map=match_map, + priority=priority, + cnt_pkt=cnt_pkt, + cnt_byte=cnt_byte + ) + if operation == WriteOperation.delete: + LOGGER.debug("Direct counter entry to delete: %s", json_resource) + return self.clear_direct_counter_entry( + d_cnt_name=d_cnt_name + ) + return None + + def insert_direct_counter_entry(self, d_cnt_name, match_map, + priority, cnt_pkt=-1, cnt_byte=-1): + """ + Insert a direct P4 counter entry. + + :param d_cnt_name: name of a direct P4 counter + :param match_map: map of match operations + :param priority: entry priority + :param cnt_pkt: packet count + :param cnt_byte: byte count + :return: inserted entry + """ + d_cnt = self.get_direct_counter(d_cnt_name) + assert d_cnt, \ + "P4 pipeline does not implement direct counter " + d_cnt_name + + assert match_map,\ + "Direct counter entry without match operations is not accepted" + + d_cnt_entry = DirectCounterEntry(d_cnt_name) + + for match_k, match_v in match_map.items(): + d_cnt_entry.table_entry.match[match_k] = match_v + + d_cnt_entry.table_entry.priority = priority + + if cnt_pkt > 0: + d_cnt_entry.packet_count = cnt_pkt + + if cnt_byte > 0: + d_cnt_entry.byte_count = cnt_byte + + d_cnt_entry.modify() + LOGGER.info("Updated direct counter entry: %s", d_cnt_entry) + + return d_cnt_entry + + def clear_direct_counter_entry(self, d_cnt_name): + """ + Clear the counters of a direct counter entry by name. + + :param d_cnt_name: name of a direct P4 counter + :return: cleared entry + """ + d_cnt = self.get_direct_counter(d_cnt_name) + assert d_cnt, \ + "P4 pipeline does not implement direct counter " + d_cnt_name + + d_cnt_entry = DirectCounterEntry(d_cnt_name) + d_cnt_entry.clear_data() + LOGGER.info("Cleared direct counter entry: %s", d_cnt_entry) + + return d_cnt_entry + + def print_direct_counter_entries_summary(self): + """ + Print a summary of a direct P4 counter state. + Summary covers: + (i) direct counter name, + (ii) number of entries in the table, and + (iii) a string of \n-separated entry IDs. + + :return: void + """ + if (KEY_DIR_COUNTER not in self.p4_objects) or \ + not self.p4_objects[KEY_DIR_COUNTER]: + LOGGER.warning("No direct counters to print\n") + return + + entry = [] + + for d_cnt in self.p4_objects[KEY_DIR_COUNTER]: + entries = self.get_direct_counter_entries(d_cnt.name) + entries_nb = len(entries) + entry_ids_str = ",".join(str(e.id) for e in entries) \ + if entries_nb > 0 else "-" + entry.append([d_cnt.name, str(entries_nb), entry_ids_str]) + + print( + tabulate( + entry, + headers=[KEY_DIR_COUNTER, "# of entries", "entry ids"], + stralign="right", + tablefmt="pretty" + ) + ) + print("\n") + + ############################################################################ + + ############################################################################ + # Meter methods + ############################################################################ + def get_meter_names(self): + """ + Retrieve a list of P4 meter names. + + :return: list of P4 meter names + """ + if KEY_METER not in self.p4_objects: + return [] + return list(meter.name for meter in self.p4_objects[KEY_METER]) + + def get_meter_entries(self, meter_name): + """ + Get a list of P4 meters by name. + + :param meter_name: name of a P4 meter + :return: list of P4 meters or None + """ + if meter_name not in self.meter_entries: + return None + self.meter_entries[meter_name].clear() + self.meter_entries[meter_name] = [] + + try: + for count, meter_entry in enumerate(MeterEntry(meter_name).read()): + LOGGER.debug( + "Meter %s - Entry %d\n%s", meter_name, count, meter_entry) + self.meter_entries[meter_name].append(meter_entry) + return self.meter_entries[meter_name] + except P4RuntimeException as ex: + LOGGER.error(ex) + return [] + + def meter_entries_to_json(self, meter_name): + """ + Encode all meter entries into a JSON object. + + :param meter_name: meter name + :return: JSON object with meter entries + """ + if (KEY_METER not in self.p4_objects) or \ + not self.p4_objects[KEY_METER]: + LOGGER.warning("No meter entries to retrieve\n") + return {} + + meter_res = {} + + for meter in self.p4_objects[KEY_METER]: + if not meter.name == meter_name: + continue + + entries = self.get_meter_entries(meter.name) + if len(entries) == 0: + continue + + meter_res["meter-name"] = meter_name + + for ent in entries: + meter_res["index"] = ent.index + meter_res["cir"] = ent.cir + meter_res["cburst"] = ent.cburst + meter_res["pir"] = ent.pir + meter_res["pburst"] = ent.pburst + + return meter_res + + def count_meter_entries(self, meter_name): + """ + Count the number of P4 meter entries by meter name. + + :param meter_name: name of a P4 meter + :return: number of P4 meters or negative integer + upon missing meter + """ + entries = self.get_meter_entries(meter_name) + if entries is None: + return -1 + return len(entries) + + def count_meter_entries_all(self): + """ + Count all entries of a P4 meter. + + :return: number of direct P4 meter entries + """ + total_cnt = 0 + for meter_name in self.get_meter_names(): + cnt = self.count_meter_entries(meter_name) + if cnt < 0: + continue + total_cnt += cnt + return total_cnt + + def meter_entry_operation_from_json(self, + json_resource, + operation: WriteOperation): + """ + Parse a JSON-based meter entry and insert/update/delete it + into/from the switch. + + :param json_resource: JSON-based meter entry + :param operation: Write operation (i.e., insert, modify, delete) + to perform. + :return: inserted entry or None in case of parsing error + """ + meter_name = parse_resource_string_from_json( + json_resource, "meter-name") + + if operation in [WriteOperation.insert, WriteOperation.update]: + index = parse_resource_integer_from_json( + json_resource, "index") + cir = parse_resource_integer_from_json( + json_resource, "committed-information-rate") + cburst = parse_resource_integer_from_json( + json_resource, "committed-burst-size") + pir = parse_resource_integer_from_json( + json_resource, "peak-information-rate") + pburst = parse_resource_integer_from_json( + json_resource, "peak-burst-size") + + LOGGER.debug("Meter entry to insert/update: %s", json_resource) + return self.insert_meter_entry( + meter_name=meter_name, + index=index, + cir=cir, + cburst=cburst, + pir=pir, + pburst=pburst + ) + if operation == WriteOperation.delete: + LOGGER.debug("Meter entry to delete: %s", json_resource) + return self.clear_meter_entry( + meter_name=meter_name + ) + return None + + def insert_meter_entry(self, meter_name, index=None, + cir=-1, cburst=-1, pir=-1, pburst=-1): + """ + Insert a P4 meter entry. + + :param meter_name: name of a P4 meter + :param index: P4 meter index + :param cir: meter's committed information rate + :param cburst: meter's committed burst size + :param pir: meter's peak information rate + :param pburst: meter's peak burst size + :return: inserted entry + """ + meter = self.get_meter(meter_name) + assert meter, \ + "P4 pipeline does not implement meter " + meter_name + + meter_entry = MeterEntry(meter_name) + + if index: + meter_entry.index = index + + if cir > 0: + meter_entry.cir = cir + + if cburst > 0: + meter_entry.cburst = cburst + + if pir > 0: + meter_entry.pir = pir + + if pburst > 0: + meter_entry.pburst = pburst + + meter_entry.modify() + LOGGER.info("Updated meter entry: %s", meter_entry) + + return meter_entry + + def clear_meter_entry(self, meter_name): + """ + Clear the rates and sizes of a meter entry by name. + + :param meter_name: name of a P4 meter + :return: cleared entry + """ + meter = self.get_meter(meter_name) + assert meter, \ + "P4 pipeline does not implement meter " + meter_name + + meter_entry = MeterEntry(meter_name) + meter_entry.clear_config() + LOGGER.info("Cleared meter entry: %s", meter_entry) + + return meter_entry + + def print_meter_entries_summary(self): + """ + Print a summary of a P4 meter state. + Summary covers: + (i) meter name, + (ii) number of entries in the table, and + (iii) a string of \n-separated entry IDs. + + :return: void + """ + if (KEY_METER not in self.p4_objects) or \ + not self.p4_objects[KEY_METER]: + LOGGER.warning("No meters to print\n") + return + + entry = [] + + for meter in self.p4_objects[KEY_METER]: + entries = self.get_meter_entries(meter.name) + entries_nb = len(entries) + entry_ids_str = ",".join(str(e.id) for e in entries) \ + if entries_nb > 0 else "-" + entry.append([meter.name, str(entries_nb), entry_ids_str]) + + print( + tabulate( + entry, + headers=[KEY_METER, "# of entries", "entry ids"], + stralign="right", + tablefmt="pretty" + ) + ) + print("\n") + + ############################################################################ + + ############################################################################ + # Direct meter methods + ############################################################################ + def get_direct_meter_names(self): + """ + Retrieve a list of direct P4 meter names. + + :return: list of direct P4 meter names + """ + if KEY_DIR_METER not in self.p4_objects: + return [] + return list(d_meter.name for d_meter in self.p4_objects[KEY_DIR_METER]) + + def get_direct_meter_entries(self, d_meter_name): + """ + Get a list of direct P4 meters by name. + + :param d_meter_name: name of a direct P4 meter + :return: list of direct P4 meters or None + """ + if d_meter_name not in self.direct_meter_entries: + return None + self.direct_meter_entries[d_meter_name].clear() + self.direct_meter_entries[d_meter_name] = [] + + try: + for count, d_meter_entry in enumerate( + MeterEntry(d_meter_name).read()): + LOGGER.debug( + "Direct meter %s - Entry %d\n%s", + d_meter_name, count, d_meter_entry) + self.direct_meter_entries[d_meter_name].append(d_meter_entry) + return self.direct_meter_entries[d_meter_name] + except P4RuntimeException as ex: + LOGGER.error(ex) + return [] + + def direct_meter_entries_to_json(self, d_meter_name): + """ + Encode all direct meter entries into a JSON object. + + :param d_meter_name: direct meter name + :return: JSON object with direct meter entries + """ + if (KEY_DIR_METER not in self.p4_objects) or \ + not self.p4_objects[KEY_DIR_METER]: + LOGGER.warning("No direct meter entries to retrieve\n") + return {} + + d_meter_res = {} + + for d_meter in self.p4_objects[KEY_DIR_METER]: + if not d_meter.name == d_meter_name: + continue + + entries = self.get_direct_meter_entries(d_meter.name) + if len(entries) == 0: + continue + + d_meter_res["direct-meter-name"] = d_meter_name + + for ent in entries: + d_meter_res["match-fields"] = [] + for k, v in ent.table_entry.match.items(): + d_meter_res["match-fields"].append( + { + "match-field": k, + "match-value": v + } + ) + d_meter_res["cir"] = ent.cir + d_meter_res["cburst"] = ent.cburst + d_meter_res["pir"] = ent.pir + d_meter_res["pburst"] = ent.pburst + + return d_meter_res + + def count_direct_meter_entries(self, d_meter_name): + """ + Count the number of direct P4 meter entries by meter name. + + :param d_meter_name: name of a direct P4 meter + :return: number of direct P4 meters or negative integer + upon missing direct meter + """ + entries = self.get_direct_meter_entries(d_meter_name) + if entries is None: + return -1 + return len(entries) + + def count_direct_meter_entries_all(self): + """ + Count all entries of a direct P4 meter. + + :return: number of direct P4 meter entries + """ + total_cnt = 0 + for d_meter_name in self.get_direct_meter_names(): + cnt = self.count_direct_meter_entries(d_meter_name) + if cnt < 0: + continue + total_cnt += cnt + return total_cnt + + def direct_meter_entry_operation_from_json(self, + json_resource, + operation: WriteOperation): + """ + Parse a JSON-based direct meter entry and insert/update/delete it + into/from the switch. + + :param json_resource: JSON-based direct meter entry + :param operation: Write operation (i.e., insert, modify, delete) + to perform. + :return: inserted entry or None in case of parsing error + """ + d_meter_name = parse_resource_string_from_json( + json_resource, "direct-meter-name") + + if operation in [WriteOperation.insert, WriteOperation.update]: + match_map = parse_match_operations_from_json(json_resource) + cir = parse_resource_integer_from_json( + json_resource, "committed-information-rate") + cburst = parse_resource_integer_from_json( + json_resource, "committed-burst-size") + pir = parse_resource_integer_from_json( + json_resource, "peak-information-rate") + pburst = parse_resource_integer_from_json( + json_resource, "peak-burst-size") + + LOGGER.debug( + "Direct meter entry to insert/update: %s", json_resource) + return self.insert_direct_meter_entry( + d_meter_name=d_meter_name, + match_map=match_map, + cir=cir, + cburst=cburst, + pir=pir, + pburst=pburst + ) + if operation == WriteOperation.delete: + LOGGER.debug("Direct meter entry to delete: %s", json_resource) + return self.clear_direct_meter_entry( + d_meter_name=d_meter_name + ) + return None + + def insert_direct_meter_entry(self, d_meter_name, match_map, + cir=-1, cburst=-1, pir=-1, pburst=-1): + """ + Insert a direct P4 meter entry. + + :param d_meter_name: name of a direct P4 meter + :param match_map: map of P4 table match operations + :param cir: meter's committed information rate + :param cburst: meter's committed burst size + :param pir: meter's peak information rate + :param pburst: meter's peak burst size + :return: inserted entry + """ + d_meter = self.get_direct_meter(d_meter_name) + assert d_meter, \ + "P4 pipeline does not implement direct meter " + d_meter_name + + assert match_map,\ + "Direct meter entry without match operations is not accepted" + + d_meter_entry = DirectMeterEntry(d_meter_name) + + for match_k, match_v in match_map.items(): + d_meter_entry.table_entry.match[match_k] = match_v + + if cir > 0: + d_meter_entry.cir = cir + + if cburst > 0: + d_meter_entry.cburst = cburst + + if pir > 0: + d_meter_entry.pir = pir + + if pburst > 0: + d_meter_entry.pburst = pburst + + d_meter_entry.modify() + LOGGER.info("Updated direct meter entry: %s", d_meter_entry) + + return d_meter_entry + + def clear_direct_meter_entry(self, d_meter_name): + """ + Clear the rates and sizes of a direct meter entry by name. + + :param d_meter_name: name of a direct P4 meter + :return: cleared entry + """ + d_meter = self.get_direct_meter(d_meter_name) + assert d_meter, \ + "P4 pipeline does not implement direct meter " + d_meter_name + + d_meter_entry = DirectMeterEntry(d_meter_name) + d_meter_entry.clear_config() + LOGGER.info("Cleared direct meter entry: %s", d_meter_entry) + + return d_meter_entry + + def print_direct_meter_entries_summary(self): + """ + Print a summary of a direct P4 meter state. + Summary covers: + (i) direct meter name, + (ii) number of entries in the table, and + (iii) a string of \n-separated entry IDs. + + :return: void + """ + if (KEY_DIR_METER not in self.p4_objects) or \ + not self.p4_objects[KEY_DIR_METER]: + LOGGER.warning("No direct meters to print\n") + return + + entry = [] + + for d_meter in self.p4_objects[KEY_DIR_METER]: + entries = self.get_direct_meter_entries(d_meter.name) + entries_nb = len(entries) + entry_ids_str = ",".join(str(e.id) for e in entries) \ + if entries_nb > 0 else "-" + entry.append([d_meter.name, str(entries_nb), entry_ids_str]) + + print( + tabulate( + entry, + headers=[KEY_DIR_METER, "# of entries", "entry ids"], + stralign="right", + tablefmt="pretty" + ) + ) + print("\n") + + ############################################################################ + + ############################################################################ + # Action profile member + ############################################################################ + def get_action_profile_names(self): + """ + Retrieve a list of action profile names. + + :return: list of action profile names + """ + if KEY_ACTION_PROFILE not in self.p4_objects: + return [] + return list(ap_name for ap_name in self.p4_objects[KEY_ACTION_PROFILE]) + + def get_action_prof_member_entries(self, ap_name): + """ + Get a list of action profile members by name. + + :param ap_name: name of a P4 action profile + :return: list of P4 action profile members + """ + if ap_name not in self.action_profile_members: + return None + self.action_profile_members[ap_name].clear() + self.action_profile_members[ap_name] = [] + + try: + for count, ap_entry in enumerate( + ActionProfileMember(ap_name).read()): + LOGGER.debug( + "Action profile member %s - Entry %d\n%s", + ap_name, count, ap_entry) + self.action_profile_members[ap_name].append(ap_entry) + return self.action_profile_members[ap_name] + except P4RuntimeException as ex: + LOGGER.error(ex) + return [] + + def action_prof_member_entries_to_json(self, ap_name): + """ + Encode all action profile members into a JSON object. + + :param ap_name: name of a P4 action profile + :return: JSON object with action profile member entries + """ + if (KEY_ACTION_PROFILE not in self.p4_objects) or \ + not self.p4_objects[KEY_ACTION_PROFILE]: + LOGGER.warning("No action profile member entries to retrieve\n") + return {} + + ap_res = {} + + for act_p in self.p4_objects[KEY_ACTION_PROFILE]: + if not act_p.name == ap_name: + continue + + ap_res["action-profile-name"] = ap_name + + entries = self.get_action_prof_member_entries(ap_name) + for ent in entries: + action = ent.action + action_name = CONTEXT.get_name_from_id(action.id) + ap_res["action"] = action_name + ap_res["action-params"] = [] + for k, v in action.items(): + ap_res["action-params"].append( + { + "param": k, + "value": v + } + ) + + ap_res["member-id"] = ent.member_id + + return ap_res + + def count_action_prof_member_entries(self, ap_name): + """ + Count the number of action profile members by name. + + :param ap_name: name of a P4 action profile + :return: number of action profile members or negative integer + upon missing member + """ + entries = self.get_action_prof_member_entries(ap_name) + if entries is None: + return -1 + return len(entries) + + def count_action_prof_member_entries_all(self): + """ + Count all action profile member entries. + + :return: number of action profile member entries + """ + total_cnt = 0 + for ap_name in self.get_action_profile_names(): + cnt = self.count_action_prof_member_entries(ap_name) + if cnt < 0: + continue + total_cnt += cnt + return total_cnt + + def action_prof_member_entry_operation_from_json(self, + json_resource, + operation: WriteOperation): + """ + Parse a JSON-based action profile member entry and insert/update/delete + it into/from the switch. + + :param json_resource: JSON-based action profile member entry + :param operation: Write operation (i.e., insert, modify, delete) + to perform. + :return: inserted entry or None in case of parsing error + """ + ap_name = parse_resource_string_from_json( + json_resource, "action-profile-name") + member_id = parse_resource_integer_from_json(json_resource, "member-id") + action_name = parse_resource_string_from_json( + json_resource, "action-name") + + if operation in [WriteOperation.insert, WriteOperation.update]: + action_params = parse_action_parameters_from_json(json_resource) + + LOGGER.debug( + "Action profile member entry to insert/update: %s", + json_resource) + return self.insert_action_prof_member_entry( + ap_name=ap_name, + member_id=member_id, + action_name=action_name, + action_params=action_params + ) + if operation == WriteOperation.delete: + LOGGER.debug( + "Action profile member entry to delete: %s", json_resource) + return self.delete_action_prof_member_entry( + ap_name=ap_name, + member_id=member_id, + action_name=action_name + ) + return None + + def insert_action_prof_member_entry(self, ap_name, member_id, + action_name, action_params): + """ + Insert a P4 action profile member entry. + + :param ap_name: name of a P4 action profile + :param member_id: action profile member id + :param action_name: P4 action name + :param action_params: map of P4 action parameters + :return: inserted entry + """ + act_p = self.get_action_profile(ap_name) + assert act_p, \ + "P4 pipeline does not implement action profile " + ap_name + + ap_member_entry = ActionProfileMember(ap_name)( + member_id=member_id, action=action_name) + + for action_k, action_v in action_params.items(): + ap_member_entry.action[action_k] = action_v + + ex_msg = "" + try: + ap_member_entry.insert() + LOGGER.info( + "Inserted action profile member entry: %s", ap_member_entry) + except P4RuntimeWriteException as ex: + ex_msg = str(ex) + except P4RuntimeException as ex: + raise P4RuntimeException from ex + + # Entry exists, needs to be modified + if "ALREADY_EXISTS" in ex_msg: + ap_member_entry.modify() + LOGGER.info( + "Updated action profile member entry: %s", ap_member_entry) + + return ap_member_entry + + def delete_action_prof_member_entry(self, ap_name, member_id, action_name): + """ + Delete a P4 action profile member entry. + + :param ap_name: name of a P4 action profile + :param member_id: action profile member id + :param action_name: P4 action name + :return: deleted entry + """ + act_p = self.get_action_profile(ap_name) + assert act_p, \ + "P4 pipeline does not implement action profile " + ap_name + + ap_member_entry = ActionProfileMember(ap_name)( + member_id=member_id, action=action_name) + ap_member_entry.delete() + LOGGER.info("Deleted action profile member entry: %s", ap_member_entry) + + return ap_member_entry + + def print_action_prof_members_summary(self): + """ + Print a summary of a P4 action profile member state. + Summary covers: + (i) action profile member id, + (ii) number of entries in the table, and + (iii) a string of \n-separated entry IDs. + + :return: void + """ + if (KEY_ACTION_PROFILE not in self.p4_objects) or \ + not self.p4_objects[KEY_ACTION_PROFILE]: + LOGGER.warning("No action profile members to print\n") + return + + entry = [] + + for ap_name in self.p4_objects[KEY_ACTION_PROFILE]: + entries = self.get_action_prof_member_entries(ap_name) + entries_nb = len(entries) + entry_ids_str = ",".join(str(e.member_id) for e in entries) \ + if entries_nb > 0 else "-" + entry.append([ap_name, str(entries_nb), entry_ids_str]) + + print( + tabulate( + entry, + headers=["action profile member", "# of entries", "entry ids"], + stralign="right", + tablefmt="pretty" + ) + ) + print("\n") + + def print_action_prof_member_entries(self, ap_name): + """ + Print all entries of a P4 action profile member. + + :param ap_name: name of a P4 action profile + :return: void + """ + if (KEY_ACTION_PROFILE not in self.p4_objects) or \ + not self.p4_objects[KEY_ACTION_PROFILE]: + LOGGER.warning("No action profile member entries to print\n") + return + + for act_p in self.p4_objects[KEY_ACTION_PROFILE]: + if not act_p.name == ap_name: + continue + + entry = [] + + entries = self.get_action_prof_member_entries(ap_name) + for ent in entries: + member_id = ent.member_id + action = ent.action + action_name = CONTEXT.get_name_from_id(action.id) + + entry.append([ap_name, str(member_id), action_name]) + + if not entry: + entry.append([ap_name] + ["-"] * 2) + + print( + tabulate( + entry, + headers=["action profile member", "member id", "action"], + stralign="right", + tablefmt="pretty" + ) + ) + print("\n") + + ############################################################################ + # Action profile group + ############################################################################ + def get_action_prof_group_entries(self, ap_name): + """ + Get a list of action profile groups by name. + + :param ap_name: name of a P4 action profile + :return: list of P4 action profile groups + """ + if ap_name not in self.action_profile_groups: + return None + self.action_profile_groups[ap_name].clear() + self.action_profile_groups[ap_name] = [] + + try: + for count, ap_entry in enumerate( + ActionProfileGroup(ap_name).read()): + LOGGER.debug("Action profile group %s - Entry %d\n%s", + ap_name, count, ap_entry) + self.action_profile_groups[ap_name].append(ap_entry) + return self.action_profile_groups[ap_name] + except P4RuntimeException as ex: + LOGGER.error(ex) + return [] + + def count_action_prof_group_entries(self, ap_name): + """ + Count the number of action profile groups by name. + + :param ap_name: name of a P4 action profile + :return: number of action profile groups or negative integer + upon missing group + """ + entries = self.get_action_prof_group_entries(ap_name) + if entries is None: + return -1 + return len(entries) + + def count_action_prof_group_entries_all(self): + """ + Count all action profile group entries. + + :return: number of action profile group entries + """ + total_cnt = 0 + for ap_name in self.get_action_profile_names(): + cnt = self.count_action_prof_group_entries(ap_name) + if cnt < 0: + continue + total_cnt += cnt + return total_cnt + + def action_prof_group_entries_to_json(self, ap_name): + """ + Encode all action profile groups into a JSON object. + + :param ap_name: name of a P4 action profile + :return: JSON object with action profile group entries + """ + if (KEY_ACTION_PROFILE not in self.p4_objects) or \ + not self.p4_objects[KEY_ACTION_PROFILE]: + LOGGER.warning("No action profile group entries to retrieve\n") + return {} + + ap_res = {} + + for act_p in self.p4_objects[KEY_ACTION_PROFILE]: + if not act_p.name == ap_name: + continue + + ap_res["action-profile-name"] = ap_name + + entries = self.get_action_prof_group_entries(ap_name) + for ent in entries: + ap_res["group-id"] = ent.group_id + ap_res["members"] = [] + for mem in ent.members: + ap_res["members"].append( + { + "member": mem + } + ) + + return ap_res + + def action_prof_group_entry_operation_from_json(self, + json_resource, + operation: WriteOperation): + """ + Parse a JSON-based action profile group entry and insert/update/delete + it into/from the switch. + + :param json_resource: JSON-based action profile group entry + :param operation: Write operation (i.e., insert, modify, delete) + to perform. + :return: inserted entry or None in case of parsing error + """ + ap_name = parse_resource_string_from_json( + json_resource, "action-profile-name") + group_id = parse_resource_integer_from_json(json_resource, "group-id") + + if operation in [WriteOperation.insert, WriteOperation.update]: + members = parse_integer_list_from_json( + json_resource, "members", "member") + + LOGGER.debug( + "Action profile group entry to insert/update: %s", + json_resource) + return self.insert_action_prof_group_entry( + ap_name=ap_name, + group_id=group_id, + members=members + ) + if operation == WriteOperation.delete: + LOGGER.debug( + "Action profile group entry to delete: %s", json_resource) + return self.delete_action_prof_group_entry( + ap_name=ap_name, + group_id=group_id + ) + return None + + def insert_action_prof_group_entry(self, ap_name, group_id, members=None): + """ + Insert a P4 action profile group entry. + + :param ap_name: name of a P4 action profile + :param group_id: action profile group id + :param members: list of associated action profile members + :return: inserted entry + """ + ap = self.get_action_profile(ap_name) + assert ap, \ + "P4 pipeline does not implement action profile " + ap_name + + ap_group_entry = ActionProfileGroup(ap_name)(group_id=group_id) + + if members: + for m in members: + ap_group_entry.add(member_id=m) + + ex_msg = "" + try: + ap_group_entry.insert() + LOGGER.info( + "Inserted action profile group entry: %s", ap_group_entry) + except P4RuntimeWriteException as ex: + ex_msg = str(ex) + except P4RuntimeException as ex: + raise P4RuntimeException from ex + + # Entry exists, needs to be modified + if "ALREADY_EXISTS" in ex_msg: + ap_group_entry.modify() + LOGGER.info( + "Updated action profile group entry: %s", ap_group_entry) + + return ap_group_entry + + def delete_action_prof_group_entry(self, ap_name, group_id): + """ + Delete a P4 action profile group entry. + + :param ap_name: name of a P4 action profile + :param group_id: action profile group id + :return: deleted entry + """ + ap = self.get_action_profile(ap_name) + assert ap, \ + "P4 pipeline does not implement action profile " + ap_name + + ap_group_entry = ActionProfileGroup(ap_name)(group_id=group_id) + ap_group_entry.delete() + LOGGER.info("Deleted action profile group entry: %s", ap_group_entry) + + return ap_group_entry + + def clear_action_prof_group_entry(self, ap_name, group_id): + """ + Clean a P4 action profile group entry. + + :param ap_name: name of a P4 action profile + :param group_id: action profile group id + :return: cleaned entry + """ + ap = self.get_action_profile(ap_name) + assert ap, \ + "P4 pipeline does not implement action profile " + ap_name + + ap_group_entry = ActionProfileGroup(ap_name)(group_id=group_id) + ap_group_entry.clear() + LOGGER.info("Cleared action profile group entry: %s", ap_group_entry) + + return ap_group_entry + + def print_action_prof_groups_summary(self): + """ + Print a summary of a P4 action profile group state. + Summary covers: + (i) action profile group id, + (ii) number of entries in the table, and + (iii) a string of \n-separated entry IDs. + + :return: void + """ + if (KEY_ACTION_PROFILE not in self.p4_objects) or \ + not self.p4_objects[KEY_ACTION_PROFILE]: + LOGGER.warning("No action profile groups to print\n") + return + + entry = [] + + for ap_name in self.p4_objects[KEY_ACTION_PROFILE]: + entries = self.get_action_prof_group_entries(ap_name) + entries_nb = len(entries) + entry_ids_str = ",".join(str(e.group_id) for e in entries) \ + if entries_nb > 0 else "-" + entry.append([ap_name, str(entries_nb), entry_ids_str]) + + print( + tabulate( + entry, + headers=["action profile group", "# of entries", "entry ids"], + stralign="right", + tablefmt="pretty" + ) + ) + print("\n") + + def print_action_prof_group_entries(self, ap_name): + """ + Print all entries of a P4 action profile group. + + :param ap_name: name of a P4 action profile + :return: void + """ + if (KEY_ACTION_PROFILE not in self.p4_objects) or \ + not self.p4_objects[KEY_ACTION_PROFILE]: + LOGGER.warning("No action profile group entries to print\n") + return + + for ap in self.p4_objects[KEY_ACTION_PROFILE]: + if not ap.name == ap_name: + continue + + entry = [] + + entries = self.get_action_prof_group_entries(ap_name) + for e in entries: + group_id = e.group_id + members_str = "\n".join(m for m in e.members) + entry.append([ap_name, str(group_id), members_str]) + + if not entry: + entry.append([ap_name] + ["-"] * 2) + + print( + tabulate( + entry, + headers=[ + "action profile group", "group id", "members" + ], + stralign="right", + tablefmt="pretty" + ) + ) + print("\n") + + ############################################################################ + # Packet replication method 1: Multicast group + ############################################################################ + def get_multicast_group_entry(self, group_id): + """ + Get a multicast group entry by group id. + + :param group_id: id of a multicast group + :return: multicast group entry or none + """ + if group_id not in self.multicast_groups: + return None + self.multicast_groups[group_id] = None + + try: + mcast_group = MulticastGroupEntry(group_id).read() + LOGGER.debug("Multicast group %d\n%s", group_id, mcast_group) + self.multicast_groups[group_id] = mcast_group + return self.multicast_groups[group_id] + except P4RuntimeException as ex: + LOGGER.error(ex) + return None + + def count_multicast_groups(self): + """ + Count the number of multicast groups. + + :return: number of multicast groups + """ + return len(self.multicast_groups.keys()) + + def multicast_group_entries_to_json(self): + """ + Encode all multicast groups into a JSON object. + + :return: JSON object with multicast group entries + """ + if not self.multicast_groups: + LOGGER.warning("No multicast group entries to retrieve\n") + return {} + + mcast_list_res = [] + + for mcast_group in self.multicast_groups.values(): + mcast_res = {} + mcast_res["group-id"] = mcast_group.group_id + + mcast_res["egress-ports"] = [] + mcast_res["instances"] = [] + for r in mcast_group.replicas: + mcast_res["egress-ports"].append( + { + "egress-port": r.egress_port + } + ) + mcast_res["instances"].append( + { + "instance": r.instance + } + ) + mcast_list_res.append(mcast_res) + + return mcast_list_res + + def multicast_group_entry_operation_from_json(self, + json_resource, + operation: WriteOperation): + """ + Parse a JSON-based multicast group entry and insert/update/delete it + into/from the switch. + + :param json_resource: JSON-based multicast group entry + :param operation: Write operation (i.e., insert, modify, delete) + to perform. + :return: inserted entry or None in case of parsing error + """ + group_id = parse_resource_integer_from_json(json_resource, "group-id") + + if operation in [WriteOperation.insert, WriteOperation.update]: + ports = parse_integer_list_from_json( + json_resource, "ports", "port") + + LOGGER.debug( + "Multicast group entry to insert/update: %s", json_resource) + return self.insert_multicast_group_entry( + group_id=group_id, + ports=ports + ) + if operation == WriteOperation.delete: + LOGGER.debug("Multicast group entry to delete: %s", json_resource) + return self.delete_multicast_group_entry( + group_id=group_id + ) + return None + + def insert_multicast_group_entry(self, group_id, ports): + """ + Insert a new multicast group. + + :param group_id: id of a multicast group + :param ports: list of egress ports to multicast + :return: inserted multicast group + """ + assert group_id > 0, \ + "Multicast group " + group_id + " must be > 0" + assert ports, \ + "No multicast group ports are provided" + + mcast_group = MulticastGroupEntry(group_id) + for p in ports: + mcast_group.add(p, 1) + + ex_msg = "" + try: + mcast_group.insert() + LOGGER.info("Inserted multicast group entry: %s", mcast_group) + except P4RuntimeWriteException as ex: + ex_msg = str(ex) + except P4RuntimeException as ex: + raise P4RuntimeException from ex + + # Entry exists, needs to be modified + if "ALREADY_EXISTS" in ex_msg: + mcast_group.modify() + LOGGER.info("Updated multicast group entry: %s", mcast_group) + + self.multicast_groups[group_id] = mcast_group + + return mcast_group + + def delete_multicast_group_entry(self, group_id): + """ + Delete a multicast group by id. + + :param group_id: id of a multicast group + :return: deleted multicast group + """ + assert group_id > 0, \ + "Multicast group " + group_id + " must be > 0" + + mcast_group = MulticastGroupEntry(group_id) + mcast_group.delete() + + if group_id in self.multicast_groups: + del self.multicast_groups[group_id] + LOGGER.info( + "Deleted multicast group %d", group_id) + + return mcast_group + + def delete_multicast_group_entries(self): + """ + Delete all multicast groups. + + :return: void + """ + for mcast_group in MulticastGroupEntry().read(): + gid = mcast_group.group_id + mcast_group.delete() + del self.multicast_groups[gid] + + assert self.count_multicast_groups() == 0, \ + "Failed to purge all multicast groups" + LOGGER.info("Deleted all multicast groups") + + def print_multicast_groups_summary(self): + """ + Print a summary of a P4 multicast group state. + Summary covers: + (i) multicast group id, + (ii) a string of \n-separated egress ports, and + (iii) a string of \n-separated replica instances. + + :return: void + """ + entry = [] + + for mcast_group in self.multicast_groups.values(): + ports_str = "\n".join( + str(r.egress_port) for r in mcast_group.replicas) + inst_str = "\n".join( + str(r.instance) for r in mcast_group.replicas) + entry.append([str(mcast_group.group_id), ports_str, inst_str]) + + if not entry: + entry.append(3 * ["-"]) + + print( + tabulate( + entry, + headers=["multicast group id", "egress ports", "instances"], + stralign="right", + tablefmt="pretty" + ) + ) + print("\n") + + ############################################################################ + # Packet replication method 2: Clone session + ############################################################################ + def get_clone_session_entry(self, session_id): + """ + Get a clone session entry by session id. + + :param session_id: id of a clone session + :return: clone session entry or none + """ + if session_id not in self.clone_session_entries: + return None + self.clone_session_entries[session_id] = None + + try: + session = CloneSessionEntry(session_id).read() + LOGGER.debug("Clone session %d\n%s", session_id, session) + self.clone_session_entries[session_id] = session + return self.clone_session_entries[session_id] + except P4RuntimeException as ex: + LOGGER.error(ex) + return None + + def count_clone_session_entries(self): + """ + Count the number of clone sessions. + + :return: number of clone sessions + """ + return len(self.clone_session_entries.keys()) + + def clone_session_entries_to_json(self): + """ + Encode all clone sessions into a JSON object. + + :return: JSON object with clone session entries + """ + if not self.clone_session_entries: + LOGGER.warning("No clone session entries to retrieve\n") + return {} + + session_list_res = [] + + for session in self.clone_session_entries.values(): + session_res = {} + session_res["session-id"] = session.session_id + + session_res["egress-ports"] = [] + session_res["instances"] = [] + for r in session.replicas: + session_res["egress-ports"].append( + { + "egress-port": r.egress_port + } + ) + session_res["instances"].append( + { + "instance": r.instance + } + ) + session_list_res.append(session_res) + + return session_list_res + + def clone_session_entry_operation_from_json(self, + json_resource, + operation: WriteOperation): + """ + Parse a JSON-based clone session entry and insert/update/delete it + into/from the switch. + + :param json_resource: JSON-based clone session entry + :param operation: Write operation (i.e., insert, modify, delete) + to perform. + :return: inserted entry or None in case of parsing error + """ + session_id = parse_resource_integer_from_json( + json_resource, "session-id") + + if operation in [WriteOperation.insert, WriteOperation.update]: + ports = parse_integer_list_from_json( + json_resource, "ports", "port") + + LOGGER.debug( + "Clone session entry to insert/update: %s", json_resource) + return self.insert_clone_session_entry( + session_id=session_id, + ports=ports + ) + if operation == WriteOperation.delete: + LOGGER.debug( + "Clone session entry to delete: %s", json_resource) + return self.delete_clone_session_entry( + session_id=session_id + ) + return None + + def insert_clone_session_entry(self, session_id, ports): + """ + Insert a new clone session. + + :param session_id: id of a clone session + :param ports: list of egress ports to clone session + :return: inserted clone session + """ + assert session_id > 0, \ + "Clone session " + session_id + " must be > 0" + assert ports, \ + "No clone session ports are provided" + + session = CloneSessionEntry(session_id) + for p in ports: + session.add(p, 1) + + ex_msg = "" + try: + session.insert() + LOGGER.info("Inserted clone session entry: %s", session) + except P4RuntimeWriteException as ex: + ex_msg = str(ex) + except P4RuntimeException as ex: + raise P4RuntimeException from ex + + # Entry exists, needs to be modified + if "ALREADY_EXISTS" in ex_msg: + session.modify() + LOGGER.info("Updated clone session entry: %s", session) + + self.clone_session_entries[session_id] = session + + return session + + def delete_clone_session_entry(self, session_id): + """ + Delete a clone session by id. + + :param session_id: id of a clone session + :return: deleted clone session + """ + assert session_id > 0, \ + "Clone session " + session_id + " must be > 0" + + session = CloneSessionEntry(session_id) + session.delete() + + if session_id in self.clone_session_entries: + del self.clone_session_entries[session_id] + LOGGER.info( + "Deleted clone session %d", session_id) + + return session + + def delete_clone_session_entries(self): + """ + Delete all clone sessions. + + :return: void + """ + for e in CloneSessionEntry().read(): + sid = e.session_id + e.delete() + del self.clone_session_entries[sid] + + assert self.count_multicast_groups() == 0, \ + "Failed to purge all clone sessions" + LOGGER.info("Deleted all clone sessions") + + def print_clone_sessions_summary(self): + """ + Print a summary of a P4 clone session state. + Summary covers: + (i) clone session id, + (ii) a string of \n-separated egress ports, and + (iii) a string of \n-separated replica instances. + + :return: void + """ + entry = [] + + for session in self.clone_session_entries.values(): + ports_str = "\n".join( + str(r.egress_port) for r in session.replicas) + inst_str = "\n".join( + str(r.instance) for r in session.replicas) + entry.append([str(session.session_id), ports_str, inst_str]) + + if not entry: + entry.append(3 * ["-"]) + + print( + tabulate( + entry, + headers=["clone session id", "egress ports", "instances"], + stralign="right", + tablefmt="pretty" + ) + ) + print("\n") + + ############################################################################ + # Packet replication method 3: Packet in + ############################################################################ + def get_packet_metadata(self, meta_type, attr_name=None, attr_id=None): + """ + Retrieve the pipeline's metadata by metadata type field. + + :param meta_type: metadata type field + :param attr_name: metadata name field (optional) + :param attr_id: metadata id field (optional) + :return: packet metadata + """ + for table in self.__p4info.controller_packet_metadata: + pre = table.preamble + if pre.name == meta_type: + for meta in table.metadata: + if attr_name is not None: + if meta.name == attr_name: + return meta + elif attr_id is not None: + if meta.id == attr_id: + return meta + raise AttributeError( + f"ControllerPacketMetadata {meta_type} has no metadata " + f"{attr_name if attr_name is not None else attr_id} (check P4Info)") + + # TODO: test packet in # pylint: disable=W0511 + def create_packet_in(self, payload, metadata=None): + """ + Create a packet-in object. + + :param payload: packet-in payload + :param metadata: packet-in metadata (optional) + :return: packet-in object + """ + if not self.p4_objects[KEY_CTL_PKT_METADATA]: + LOGGER.warning("Cannot create packet in. " + "No controller packet metadata in the pipeline\n") + return None + + packet_in = PacketOut() + packet_in.payload = payload + if metadata: + for name, value in metadata.items(): + p4info_meta = self.get_packet_metadata("packet_in", name) + meta = packet_in.metadata.add() + meta.metadata_id = p4info_meta.id + meta.value = encode(value, p4info_meta.bitwidth) + return packet_in + + def send_packet_in(self, payload, metadata=None, timeout=1): + """ + Send a packet-in message. + Note that the sniff method is blocking, thus it should be invoked by + another thread. + + :param payload: packet-in payload + :param metadata: packet-in metadata (optional) + :param timeout: packet-in timeout (defaults to 1s) + :return: void + """ + packet_in = self.create_packet_in(payload, metadata) + + # TODO: experimental piece of code # pylint: disable=W0511 + captured_packet = [] + + def _sniff_packet(captured_pkt): + """ + Invoke packet-in sniff method. + + :param captured_pkt: buffer for the packet to be captured + :return: void + """ + captured_pkt += packet_in.sniff(timeout=timeout) + + _t = Thread(target=_sniff_packet, args=(captured_packet,)) + _t.start() + # P4Runtime client sends the packet to the switch + CLIENT.stream_in_q["packet"].put(packet_in) + _t.join() + LOGGER.info("Packet-in sent: %s", packet_in) + + ############################################################################ + # Packet replication method 4: Packet out + ############################################################################ + # TODO: test packet out # pylint: disable=W0511 + def create_packet_out(self, payload, metadata=None): + """ + Create a packet-out object. + + :param payload: packet-out payload + :param metadata: packet-out metadata (optional) + :return: packet-out object + """ + if not self.p4_objects[KEY_CTL_PKT_METADATA]: + LOGGER.warning("Cannot create packet out. " + "No controller packet metadata in the pipeline\n") + return None + + packet_out = PacketOut() + packet_out.payload = payload + if metadata: + for name, value in metadata.items(): + p4info_meta = self.get_packet_metadata("packet_out", name) + meta = packet_out.metadata.add() + meta.metadata_id = p4info_meta.id + meta.value = encode(value, p4info_meta.bitwidth) + return packet_out + + def send_packet_out(self, payload, metadata=None): + """ + Send a packet-out message. + + :param payload: packet-out payload + :param metadata: packet-out metadata (optional) + :return: void + """ + packet_out = self.create_packet_out(payload, metadata) + packet_out.send() + LOGGER.info("Packet-out sent: %s", packet_out) + + ############################################################################ + # Packet replication method 5: Idle timeout notification + ############################################################################ + # TODO: Support IdleTimeoutNotification # pylint: disable=W0511 + ############################################################################ + + def print_objects(self): + """ + Print all P4 objects of the installed pipeline. + + :return: void + """ + if not self.p4_objects: + self.__discover_objects() + + for obj_name, objects in self.p4_objects.items(): + entry = [] + + for obj in objects: + entry.append([obj.name]) + + if not entry: + entry.append("-") + print( + tabulate( + entry, + headers=[obj_name], + stralign="right", + tablefmt="pretty" + ) + ) + print("\n") + + +class P4Object: + """ + P4 object. + """ + + def __init__(self, obj_type, obj): + self.name = obj.preamble.name + self.id = obj.preamble.id + self._obj_type = obj_type + self._obj = obj + self.__doc__ = f""" +A wrapper around the P4Info Protobuf message for +{obj_type.pretty_name} '{self.name}'. +You can access any field from the message with <self>.<field name>. +You can access the name directly with <self>.name. +You can access the id directly with <self>.id. +If you need the underlying Protobuf message, you can access it with msg(). +""" + + def __getattr__(self, name): + return getattr(self._obj, name) + + def __settattr__(self, name, value): + return UserError( + f"Operation {name}:{value} not supported") + + def msg(self): + """Get Protobuf message object""" + return self._obj + + def actions(self): + """Print list of actions, only for tables and action profiles.""" + if self._obj_type == P4Type.table: + for action in self._obj.action_refs: + print(CONTEXT.get_name_from_id(action.id)) + elif self._obj_type == P4Type.action_profile: + t_id = self._obj.table_ids[0] + t_name = CONTEXT.get_name_from_id(t_id) + t = CONTEXT.get_table(t_name) + for action in t.action_refs: + print(CONTEXT.get_name_from_id(action.id)) + else: + raise UserError( + "'actions' is only available for tables and action profiles") + + +class P4Objects: + """ + P4 objects. + """ + + def __init__(self, obj_type): + self._obj_type = obj_type + self._names = sorted([name for name, _ in CONTEXT.get_objs(obj_type)]) + self._iter = None + self.__doc__ = """ +All the {pnames} in the P4 program. +To access a specific {pname}, use {p4info}['<name>']. +You can use this class to iterate over all {pname} instances: +\tfor x in {p4info}: +\t\tprint(x.id) +""".format(pname=obj_type.pretty_name, pnames=obj_type.pretty_names, + p4info=obj_type.p4info_name) + + def __getitem__(self, name): + obj = CONTEXT.get_obj(self._obj_type, name) + if obj is None: + raise UserError( + f"{self._obj_type.pretty_name} '{name}' does not exist") + return P4Object(self._obj_type, obj) + + def __setitem__(self, name, value): + raise UserError("Operation not allowed") + + def __iter__(self): + self._iter = iter(self._names) + return self + + def __next__(self): + name = next(self._iter) + return self[name] + + +class MatchKey: + """ + P4 match key. + """ + + def __init__(self, table_name, match_fields): + self._table_name = table_name + self._fields = OrderedDict() + self._fields_suffixes = {} + for mf in match_fields: + self._add_field(mf) + self._mk = OrderedDict() + self._set_docstring() + + def _set_docstring(self): + self.__doc__ = f"Match key fields for table '{self._table_name}':\n\n" + for _, info in self._fields.items(): + self.__doc__ += str(info) + self.__doc__ += """ +Set a field value with <self>['<field_name>'] = '...' + * For exact match: <self>['<f>'] = '<value>' + * For ternary match: <self>['<f>'] = '<value>&&&<mask>' + * For LPM match: <self>['<f>'] = '<value>/<mask>' + * For range match: <self>['<f>'] = '<value>..<mask>' + * For optional match: <self>['<f>'] = '<value>' + +If it's inconvenient to use the whole field name, you can use a unique suffix. + +You may also use <self>.set(<f>='<value>') +\t(<f> must not include a '.' in this case, +but remember that you can use a unique suffix) +""" + + def _get_mf(self, name): + if name in self._fields: + return self._fields[name] + if name in self._fields_suffixes: + return self._fields[self._fields_suffixes[name]] + raise UserError( + f"'{name}' is not a valid match field name, nor a valid unique " + f"suffix, for table '{self._table_name}'") + + def __setitem__(self, name, value): + field_info = self._get_mf(name) + self._mk[name] = self._parse_mf(value, field_info) + print(self._mk[name]) + + def __getitem__(self, name): + _ = self._get_mf(name) + print(self._mk.get(name, "Unset")) + + def _parse_mf(self, s, field_info): + if not isinstance(s, str): + raise UserError("Match field value must be a string") + if field_info.match_type == p4info_pb2.MatchField.EXACT: + return self._parse_mf_exact(s, field_info) + if field_info.match_type == p4info_pb2.MatchField.LPM: + return self._parse_mf_lpm(s, field_info) + if field_info.match_type == p4info_pb2.MatchField.TERNARY: + return self._parse_mf_ternary(s, field_info) + if field_info.match_type == p4info_pb2.MatchField.RANGE: + return self._parse_mf_range(s, field_info) + if field_info.match_type == p4info_pb2.MatchField.OPTIONAL: + return self._parse_mf_optional(s, field_info) + raise UserError( + f"Unsupported match type for field:\n{field_info}") + + def _parse_mf_exact(self, s, field_info): + v = encode(s.strip(), field_info.bitwidth) + return self._sanitize_and_convert_mf_exact(v, field_info) + + def _sanitize_and_convert_mf_exact(self, value, field_info): + mf = p4runtime_pb2.FieldMatch() + mf.field_id = field_info.id + mf.exact.value = make_canonical_if_option_set(value) + return mf + + def _parse_mf_optional(self, s, field_info): + v = encode(s.strip(), field_info.bitwidth) + return self._sanitize_and_convert_mf_optional(v, field_info) + + def _sanitize_and_convert_mf_optional(self, value, field_info): + mf = p4runtime_pb2.FieldMatch() + mf.field_id = field_info.id + mf.optional.value = make_canonical_if_option_set(value) + return mf + + def _parse_mf_lpm(self, s, field_info): + try: + prefix, length = s.split('/') + prefix, length = prefix.strip(), length.strip() + except ValueError: + prefix = s + length = str(field_info.bitwidth) + + prefix = encode(prefix, field_info.bitwidth) + try: + length = int(length) + except ValueError as ex: + raise UserError(f"'{length}' is not a valid prefix length") from ex + + return self._sanitize_and_convert_mf_lpm(prefix, length, field_info) + + def _sanitize_and_convert_mf_lpm(self, prefix, length, field_info): + if length == 0: + raise UserError( + "Ignoring LPM don't care match (prefix length of 0) " + "as per P4Runtime spec") + + mf = p4runtime_pb2.FieldMatch() + mf.field_id = field_info.id + mf.lpm.prefix_len = length + + first_byte_masked = length // 8 + if first_byte_masked == len(prefix): + mf.lpm.value = prefix + return mf + + barray = bytearray(prefix) + transformed = False + r = length % 8 + byte_mask = 0xff & ((0xff << (8 - r))) + if barray[first_byte_masked] & byte_mask != barray[first_byte_masked]: + transformed = True + barray[first_byte_masked] = barray[first_byte_masked] & byte_mask + + for i in range(first_byte_masked + 1, len(prefix)): + if barray[i] != 0: + transformed = True + barray[i] = 0 + if transformed: + print("LPM value was transformed to conform to the P4Runtime spec " + "(trailing bits must be unset)") + mf.lpm.value = bytes(make_canonical_if_option_set(barray)) + return mf + + def _parse_mf_ternary(self, s, field_info): + try: + value, mask = s.split('&&&') + value, mask = value.strip(), mask.strip() + except ValueError: + value = s.strip() + mask = "0b" + ("1" * field_info.bitwidth) + + value = encode(value, field_info.bitwidth) + mask = encode(mask, field_info.bitwidth) + + return self._sanitize_and_convert_mf_ternary(value, mask, field_info) + + def _sanitize_and_convert_mf_ternary(self, value, mask, field_info): + if int.from_bytes(mask, byteorder='big') == 0: + raise UserError( + "Ignoring ternary don't care match (mask of 0s) " + "as per P4Runtime spec") + + mf = p4runtime_pb2.FieldMatch() + mf.field_id = field_info.id + + barray = bytearray(value) + transformed = False + for i in range(len(value)): + if barray[i] & mask[i] != barray[i]: + transformed = True + barray[i] = barray[i] & mask[i] + if transformed: + print("Ternary value was transformed to conform to " + "the P4Runtime spec (masked off bits must be unset)") + mf.ternary.value = bytes( + make_canonical_if_option_set(barray)) + mf.ternary.mask = make_canonical_if_option_set(mask) + return mf + + def _parse_mf_range(self, s, field_info): + try: + start, end = s.split('..') + start, end = start.strip(), end.strip() + except ValueError as ex: + raise UserError(f"'{s}' does not specify a valid range, " + f"use '<start>..<end>'") from ex + + start = encode(start, field_info.bitwidth) + end = encode(end, field_info.bitwidth) + + return self._sanitize_and_convert_mf_range(start, end, field_info) + + def _sanitize_and_convert_mf_range(self, start, end, field_info): + start_ = int.from_bytes(start, byteorder='big') + end_ = int.from_bytes(end, byteorder='big') + if start_ > end_: + raise UserError("Invalid range match: start is greater than end") + if start_ == 0 and end_ == ((1 << field_info.bitwidth) - 1): + raise UserError( + "Ignoring range don't care match (all possible values) " + "as per P4Runtime spec") + mf = p4runtime_pb2.FieldMatch() + mf.field_id = field_info.id + mf.range.low = make_canonical_if_option_set(start) + mf.range.high = make_canonical_if_option_set(end) + return mf + + def _add_field(self, field_info): + self._fields[field_info.name] = field_info + self._recompute_suffixes() + + def _recompute_suffixes(self): + suffixes = {} + suffix_count = Counter() + for fname in self._fields: + suffix = None + for s in reversed(fname.split(".")): + suffix = s if suffix is None else s + "." + suffix + suffixes[suffix] = fname + suffix_count[suffix] += 1 + for suffix, c in suffix_count.items(): + if c > 1: + del suffixes[suffix] + self._fields_suffixes = suffixes + + def __str__(self): + return '\n'.join([str(mf) for name, mf in self._mk.items()]) + + def fields(self): + """ + Return a list of match fields. + + :return: list of match fields or None + """ + fields = [] + for name, _ in self._mk.items(): + fields.append(name) + return fields + + def value(self, field_name): + """ + Get the value of a match field. + + :param field_name: match field name + :return: match field value + """ + for name, info in self._fields.items(): + if name != field_name: + continue + if info.match_type == p4info_pb2.MatchField.EXACT: + return self._mk[name].exact.value.hex() + if info.match_type == p4info_pb2.MatchField.LPM: + return self._mk[name].lpm.value.hex() + if info.match_type == p4info_pb2.MatchField.TERNARY: + return self._mk[name].ternary.value.hex() + if info.match_type == p4info_pb2.MatchField.RANGE: + return self._mk[name].range.value.hex() + if info.match_type == p4info_pb2.MatchField.OPTIONAL: + return self._mk[name].optional.value.hex() + return None + + def match_type(self, field_name): + """ + Get the type of a match field. + + :param field_name: match field name + :return: match field type + """ + for name, info in self._fields.items(): + if name not in field_name: + continue + return info.match_type + return None + + def set(self, **kwargs): + """ + Set match field parameter. + + :param kwargs: parameters + :return: void + """ + for name, value in kwargs.items(): + self[name] = value + + def clear(self): + """ + Clear all match fields. + + :return: void + """ + self._mk.clear() + + def _count(self): + return len(self._mk) + + +class Action: + """ + P4 action. + """ + + def __init__(self, action_name=None): + self._init = False + if action_name is None: + raise UserError("Please provide name for action") + self.action_name = action_name + action_info = CONTEXT.get_action(action_name) + if action_info is None: + raise UserError(f"Unknown action '{action_name}'") + self._action_id = action_info.preamble.id + self._params = OrderedDict() + for param in action_info.params: + self._params[param.name] = param + self._action_info = action_info + self._param_values = OrderedDict() + self._set_docstring() + self._init = True + + def _set_docstring(self): + self.__doc__ = f"Action parameters for action '{self.action_name}':\n\n" + for _, info in self._params.items(): + self.__doc__ += str(info) + self.__doc__ += "\n\n" + self.__doc__ += "Set a param value with " \ + "<self>['<param_name>'] = '<value>'\n" + self.__doc__ += "You may also use <self>.set(<param_name>='<value>')\n" + + def _get_param(self, name): + if name not in self._params: + raise UserError("'{name}' is not a valid action parameter name " + "for action '{self._action_name}'") + return self._params[name] + + def __setattr__(self, name, value): + if name[0] == "_" or not self._init: + super().__setattr__(name, value) + return + if name == "action_name": + raise UserError("Cannot change action name") + super().__setattr__(name, value) + + def __setitem__(self, name, value): + param_info = self._get_param(name) + self._param_values[name] = self._parse_param(value, param_info) + print(self._param_values[name]) + + def __getitem__(self, name): + _ = self._get_param(name) + print(self._param_values.get(name, "Unset")) + + def _parse_param(self, s, param_info): + if not isinstance(s, str): + raise UserError("Action parameter value must be a string") + v = encode(s, param_info.bitwidth) + p = p4runtime_pb2.Action.Param() + p.param_id = param_info.id + p.value = make_canonical_if_option_set(v) + return p + + def msg(self): + """ + Create an action message. + + :return: action message + """ + msg = p4runtime_pb2.Action() + msg.action_id = self._action_id + msg.params.extend(self._param_values.values()) + return msg + + def _from_msg(self, msg): + assert self._action_id == msg.action_id + self._params.clear() + for p in msg.params: + p_name = CONTEXT.get_param_name(self.action_name, p.param_id) + self._param_values[p_name] = p + + def __str__(self): + return str(self.msg()) + + def id(self): + """ + Get action ID. + + :return: action ID + """ + return self._action_info.preamble.id + + def alias(self): + """ + Get action alias. + + :return: action alias + """ + return str(self._action_info.preamble.alias) + + def set(self, **kwargs): + """ + Set action parameters. + + :param kwargs: parameters + :return: void + """ + for name, value in kwargs.items(): + self[name] = value + + +class _EntityBase: + """ + Basic entity. + """ + + def __init__(self, entity_type, p4runtime_cls, modify_only=False): + self._init = False + self._entity_type = entity_type + self._entry = p4runtime_cls() + self._modify_only = modify_only + + def __dir__(self): + d = ["msg", "read"] + if self._modify_only: + d.append("modify") + else: + d.extend(["insert", "modify", "delete"]) + return d + + # to be called before issuing a P4Runtime request + # enforces checks that cannot be performed when setting individual fields + def _validate_msg(self): + return True + + def _update_msg(self): + pass + + def __getattr__(self, name): + raise AttributeError(f"'{self.__class__.__name__}' object " + f"has no attribute '{name}'") + + def msg(self): + """ + Get a basic entity message. + + :return: entity message + """ + self._update_msg() + return self._entry + + def _write(self, type_): + self._update_msg() + self._validate_msg() + update = p4runtime_pb2.Update() + update.type = type_ + getattr(update.entity, self._entity_type.name).CopyFrom(self._entry) + CLIENT.write_update(update) + + def insert(self): + """ + Insert an entity. + + :return: void + """ + if self._modify_only: + raise NotImplementedError( + f"Insert not supported for {self._entity_type.name}") + logging.debug("Inserting entry") + self._write(p4runtime_pb2.Update.INSERT) + + def delete(self): + """ + Delete an entity. + + :return: void + """ + if self._modify_only: + raise NotImplementedError( + f"Delete not supported for {self._entity_type.name}") + logging.debug("Deleting entry") + self._write(p4runtime_pb2.Update.DELETE) + + def modify(self): + """ + Modify an entity. + + :return: void + """ + logging.debug("Modifying entry") + self._write(p4runtime_pb2.Update.MODIFY) + + def _from_msg(self, msg): + raise NotImplementedError + + def read(self, function=None): + """ + Read an entity. + + :param function: function to read (optional) + :return: retrieved entity + """ + # Entities should override this method and provide a helpful docstring + self._update_msg() + self._validate_msg() + entity = p4runtime_pb2.Entity() + getattr(entity, self._entity_type.name).CopyFrom(self._entry) + + iterator = CLIENT.read_one(entity) + + # Cannot use a (simpler) generator here as we need to + # decorate __next__ with @parse_p4runtime_error. + class _EntryIterator: + def __init__(self, entity, it): + self._entity = entity + self._it = it + self._entities_it = None + + def __iter__(self): + return self + + @parse_p4runtime_error + def __next__(self): + if self._entities_it is None: + rep = next(self._it) + self._entities_it = iter(rep.entities) + try: + entity = next(self._entities_it) + except StopIteration: + self._entities_it = None + return next(self) + + if isinstance(self._entity, _P4EntityBase): + ent = type(self._entity)( + self._entity.name) # create new instance of same entity + else: + ent = type(self._entity)() + msg = getattr(entity, self._entity._entity_type.name) + ent._from_msg(msg) + # neither of these should be needed + # ent._update_msg() + # ent._entry.CopyFrom(msg) + return ent + + if function is None: + return _EntryIterator(self, iterator) + for x in _EntryIterator(self, iterator): + function(x) + + +class _P4EntityBase(_EntityBase): + """ + Basic P4 entity. + """ + + def __init__(self, p4_type, entity_type, p4runtime_cls, name=None, + modify_only=False): + super().__init__(entity_type, p4runtime_cls, modify_only) + self._p4_type = p4_type + if name is None: + raise UserError( + f"Please provide name for {p4_type.pretty_name}") + self.name = name + self._info = P4Objects(p4_type)[name] + self.id = self._info.id + + def __dir__(self): + return super().__dir__() + ["name", "id", "info"] + + def _from_msg(self, msg): + raise NotImplementedError + + def info(self): + """ + Display P4Info entry for the object. + + :return: P4 info entry + """ + return self._info + + +class ActionProfileMember(_P4EntityBase): + """ + P4 action profile member. + """ + + def __init__(self, action_profile_name=None): + super().__init__( + P4Type.action_profile, P4RuntimeEntity.action_profile_member, + p4runtime_pb2.ActionProfileMember, action_profile_name) + self.member_id = 0 + self.action = None + self._valid_action_ids = self._get_action_set() + self.__doc__ = f""" +An action profile member for '{action_profile_name}' + +Use <self>.info to display the P4Info entry for the action profile. + +Set the member id with <self>.member_id = <expr>. + +To set the action specification <self>.action = <instance of type Action>. +To set the value of action parameters, +use <self>.action['<param name>'] = <expr>. +Type <self>.action? for more details. + + +Typical usage to insert an action profile member: +m = action_profile_member['<action_profile_name>'](action='<action_name>', +member_id=1) +m.action['<p1>'] = ... +... +m.action['<pM>'] = ... +# OR m.action.set(p1=..., ..., pM=...) +m.insert + +For information about how to read members, use <self>.read? +""" + self._init = True + + def __dir__(self): + return super().__dir__() + ["member_id", "action"] + + def _get_action_set(self): + t_id = self._info.table_ids[0] + t_name = CONTEXT.get_name_from_id(t_id) + t = CONTEXT.get_table(t_name) + return {action.id for action in t.action_refs} + + def __call__(self, **kwargs): + for name, value in kwargs.items(): + if name == "action" and isinstance(value, str): + value = Action(value) + setattr(self, name, value) + return self + + def __setattr__(self, name, value): + if name[0] == "_" or not self._init: + super().__setattr__(name, value) + return + if name == "name": + raise UserError("Cannot change action profile name") + if name == "member_id": + if not isinstance(value, int): + raise UserError("member_id must be an integer") + if name == "action" and value is not None: + if not isinstance(value, Action): + raise UserError("action must be an instance of Action") + if not self._is_valid_action_id(value._action_id): + raise UserError(f"action '{value.action_name}' is not a valid " + f"action for this action profile") + super().__setattr__(name, value) + + def _is_valid_action_id(self, action_id): + return action_id in self._valid_action_ids + + def _update_msg(self): + self._entry.action_profile_id = self.id + self._entry.member_id = self.member_id + if self.action is not None: + self._entry.action.CopyFrom(self.action.msg()) + + def _from_msg(self, msg): + self.member_id = msg.member_id + if msg.HasField('action'): + action = msg.action + action_name = CONTEXT.get_name_from_id(action.action_id) + self.action = Action(action_name) + self.action._from_msg(action) + + def read(self, function=None): + """Generate a P4Runtime Read RPC. Supports wildcard reads (just leave + the appropriate fields unset). + + If function is None, returns an iterator. Iterate over it to get all the + members (as ActionProfileMember instances) returned by the + server. Otherwise, function is applied to all the members returned + by the server. + """ + return super().read(function) + + +class GroupMember: + """ + P4 group member. + + A member in an ActionProfileGroup. + Construct with GroupMember(<member_id>, weight=<weight>, watch=<watch>, + watch_port=<watch_port>). + You can set / get attributes member_id (required), weight (default 1), + watch (default 0), watch_port (default ""). + """ + + def __init__(self, member_id=None, weight=1, watch=0, watch_port=b""): + if member_id is None: + raise UserError("member_id is required") + self._msg = p4runtime_pb2.ActionProfileGroup.Member() + self._msg.member_id = member_id + self._msg.weight = weight + if watch: + self._msg.watch = watch + if watch_port: + self._msg.watch_port = watch_port + + def __dir__(self): + return ["member_id", "weight", "watch", "watch_port"] + + def __setattr__(self, name, value): + if name[0] == "_": + super().__setattr__(name, value) + return + if name == "member_id": + if not isinstance(value, int): + raise UserError("member_id must be an integer") + self._msg.member_id = value + return + if name == "weight": + if not isinstance(value, int): + raise UserError("weight must be an integer") + self._msg.weight = value + return + if name == "watch": + if not isinstance(value, int): + raise UserError("watch must be an integer") + self._msg.watch = value + return + if name == "watch_port": + if not isinstance(value, bytes): + raise UserError("watch_port must be a byte string") + self._msg.watch_port = value + return + super().__setattr__(name, value) + + def __getattr__(self, name): + if name == "member_id": + return self._msg.member_id + if name == "weight": + return self._msg.weight + if name == "watch": + return self._msg.watch + if name == "watch_port": + return self._msg.watch_port + return super().__getattr__(name) + + def __str__(self): + return str(self._msg) + + +class ActionProfileGroup(_P4EntityBase): + """ + P4 action profile group. + """ + + def __init__(self, action_profile_name=None): + super().__init__( + P4Type.action_profile, P4RuntimeEntity.action_profile_group, + p4runtime_pb2.ActionProfileGroup, action_profile_name) + self.group_id = 0 + self.max_size = 0 + self.members = [] + self.__doc__ = f""" +An action profile group for '{action_profile_name}' + +Use <self>.info to display the P4Info entry for the action profile. + +Set the group id with <self>.group_id = <expr>. Default is 0. +Set the max size with <self>.max_size = <expr>. Default is 0. + +Add members to the group with <self>.add(<member_id>, weight=<weight>, watch=<watch>, +watch_port=<watch_port>). +weight, watch and watch port are optional (default to 1, 0 and "" respectively). + +Typical usage to insert an action profile group: +g = action_profile_group['<action_profile_name>'](group_id=1) +g.add(<member id 1>) +g.add(<member id 2>) +# OR g.add(<member id 1>).add(<member id 2>) + +For information about how to read groups, use <self>.read? +""" + self._init = True + + def __dir__(self): + return super().__dir__() + ["group_id", "max_size", "members", "add", + "clear"] + + def __call__(self, **kwargs): + for name, value in kwargs.items(): + setattr(self, name, value) + return self + + def __setattr__(self, name, value): + if name[0] == "_" or not self._init: + super().__setattr__(name, value) + return + if name == "name": + raise UserError("Cannot change action profile name") + if name == "group_id": + if not isinstance(value, int): + raise UserError("group_id must be an integer") + if name == "members": + if not isinstance(value, list): + raise UserError("members must be a list of GroupMember objects") + for member in value: + if not isinstance(member, GroupMember): + raise UserError( + "members must be a list of GroupMember objects") + super().__setattr__(name, value) + + def add(self, member_id=None, weight=1, watch=0, watch_port=b""): + """Add a member to the members list.""" + self.members.append(GroupMember(member_id, weight, watch, watch_port)) + return self + + def clear(self): + """Empty members list.""" + self.members = [] + + def _update_msg(self): + self._entry.action_profile_id = self.id + self._entry.group_id = self.group_id + self._entry.max_size = self.max_size + del self._entry.members[:] + for member in self.members: + if not isinstance(member, GroupMember): + raise UserError("members must be a list of GroupMember objects") + m = self._entry.members.add() + m.CopyFrom(member._msg) + + def _from_msg(self, msg): + self.group_id = msg.group_id + self.max_size = msg.max_size + self.members = [] + for member in msg.members: + self.add(member.member_id, member.weight, member.watch, + member.watch_port) + + def read(self, function=None): + """Generate a P4Runtime Read RPC. Supports wildcard reads (just leave + the appropriate fields unset). + + If function is None, returns an iterator. Iterate over it to get all the + members (as ActionProfileGroup instances) returned by the + server. Otherwise, function is applied to all the groups returned by the + server. + """ + return super().read(function) + + +def _get_action_profile(table_name): + table = CONTEXT.get_table(table_name) + implementation_id = table.implementation_id + if implementation_id == 0: + return None + try: + implementation_name = CONTEXT.get_name_from_id(implementation_id) + except KeyError as ex: + raise InvalidP4InfoError( + f"Invalid implementation_id {implementation_id} for " + f"table '{table_name}'") from ex + ap = CONTEXT.get_obj(P4Type.action_profile, implementation_name) + if ap is None: + raise InvalidP4InfoError( + f"Unknown implementation for table '{table_name}'") + return ap + + +class OneshotAction: + """ + A P4 action in a oneshot action set. + Construct with OneshotAction(<action (Action instance)>, + weight=<weight>, watch=<watch>, watch_port=<watch_port>). + You can set / get attributes action (required), weight (default 1), + watch (default 0), watch_port (default ""). + """ + + def __init__(self, action=None, weight=1, watch=0, watch_port=b""): + if action is None: + raise UserError("action is required") + self.action = action + self.weight = weight + self.watch = watch + self.watch_port = watch_port + + def __dir__(self): + return ["action", "weight", "watch", "watch_port", "msg"] + + def __setattr__(self, name, value): + if name[0] == "_": + super().__setattr__(name, value) + return + if name == "action": + if not isinstance(value, Action): + raise UserError("action must be an instance of Action") + elif name == "weight": + if not isinstance(value, int): + raise UserError("weight must be an integer") + elif name == "watch": + if not isinstance(value, int): + raise UserError("watch must be an integer") + elif name == "watch_port": + print(type(value), value) + if not isinstance(value, bytes): + raise UserError("watch_port must be a byte string") + super().__setattr__(name, value) + + def msg(self): + """ + Create an one shot action message. + + :return: one shot action message + """ + msg = p4runtime_pb2.ActionProfileAction() + msg.action.CopyFrom(self.action.msg()) + msg.weight = self.weight + if self.watch: + msg.watch = self.watch + if self.watch_port: + msg.watch_port = self.watch_port + return msg + + def __str__(self): + return str(self.msg()) + + +class Oneshot: + """ + One shot action set. + """ + + def __init__(self, table_name=None): + self._init = False + if table_name is None: + raise UserError("Please provide table name") + self.table_name = table_name + self.actions = [] + self._table_info = P4Objects(P4Type.table)[table_name] + ap = _get_action_profile(table_name) + if not ap: + raise UserError("Cannot create Oneshot instance for a direct table") + if not ap.with_selector: + raise UserError( + "Cannot create Oneshot instance for a table " + "with an action profile without selector") + self.__doc__ = f""" +A "oneshot" action set for table '{self.table_name}'. + +To add an action to the set, use <self>.add(<Action instance>). +You can also access the set of actions with <self>.actions (which is a Python list). +""" + self._init = True + + def __dir__(self): + return ["table_name", "actions", "add", "msg"] + + def __setattr__(self, name, value): + if name[0] == "_" or not self._init: + super().__setattr__(name, value) + return + if name == "table_name": + raise UserError("Cannot change table name") + if name == "actions": + if not isinstance(value, list): + raise UserError( + "actions must be a list of OneshotAction objects") + for member in value: + if not isinstance(member, OneshotAction): + raise UserError( + "actions must be a list of OneshotAction objects") + if not self._is_valid_action_id(value.action._action_id): + raise UserError( + f"action '{value.action.action_name}' is not a valid " + f"action for table {self.table_name}") + super().__setattr__(name, value) + + def _is_valid_action_id(self, action_id): + for action_ref in self._table_info.action_refs: + if action_id == action_ref.id: + return True + return False + + def add(self, action=None, weight=1, watch=0, watch_port=b""): + """ + Add an action to the oneshot action set. + + :param action: action object + :param weight: weight (integer) + :param watch: watch (integer) + :param watch_port: watch port + :return: + """ + self.actions.append(OneshotAction(action, weight, watch, watch_port)) + return self + + def msg(self): + """ + Create an action profile message. + + :return: action profile message + """ + msg = p4runtime_pb2.ActionProfileActionSet() + msg.action_profile_actions.extend( + [action.msg() for action in self.actions]) + return msg + + def _from_msg(self, msg): + for action in msg.action_profile_actions: + action_name = CONTEXT.get_name_from_id(action.action.action_id) + a = Action(action_name) + a._from_msg(action.action) + self.actions.append(OneshotAction(a, action.weight, action.watch, + action.watch_port)) + + def __str__(self): + return str(self.msg()) + + +class _CounterData: + """ + P4 counter data. + """ + + @staticmethod + def attrs_for_counter_type(counter_type): + """ + Return counter attributes. + + :param counter_type: P4 counter type + :return: list of counter attributes + """ + attrs = [] + if counter_type in {p4info_pb2.CounterSpec.BYTES, + p4info_pb2.CounterSpec.BOTH}: + attrs.append("byte_count") + if counter_type in {p4info_pb2.CounterSpec.PACKETS, + p4info_pb2.CounterSpec.BOTH}: + attrs.append("packet_count") + return attrs + + def __init__(self, counter_name, counter_type): + self._counter_name = counter_name + self._counter_type = counter_type + self._msg = p4runtime_pb2.CounterData() + self._attrs = _CounterData.attrs_for_counter_type(counter_type) + + def __dir__(self): + return self._attrs + + def __setattr__(self, name, value): + if name[0] == "_": + super().__setattr__(name, value) + return + if name not in self._attrs: + type_name = p4info_pb2._COUNTERSPEC_UNIT.values_by_number[ + self._counter_type].name + raise UserError( + f"Counter '{self._counter_name}' is of type '{type_name}', " + f"you cannot set '{name}'") + if not isinstance(value, int): + raise UserError(f"{name} must be an integer") + setattr(self._msg, name, value) + + def __getattr__(self, name): + if name in ("byte_count", "packet_count"): + return getattr(self._msg, name) + raise AttributeError(f"'{self.__class__.__name__}' object has no " + f"attribute '{name}'") + + def msg(self): + """ + Create a counter data message. + + :return: counter data message + """ + return self._msg + + def _from_msg(self, msg): + self._msg.CopyFrom(msg) + + def __str__(self): + return str(self.msg()) + + @classmethod + def set_count(cls, instance, counter_name, counter_type, name, value): + """ + Set the value of a certain counter. + + :param instance: counter instance + :param counter_name: counter name + :param counter_type: counter type + :param name: counter attribute name + :param value: counter attribute value + :return: updated counter instance + """ + if instance is None: + d = cls(counter_name, counter_type) + else: + d = instance + setattr(d, name, value) + return d + + @classmethod + def get_count(cls, instance, counter_name, counter_type, name): + """ + Get the value of a certain counter. + + :param instance: + :param counter_name: counter name + :param counter_type: counter type + :param name: counter attribute name + :return: counter name and value + """ + if instance is None: + d = cls(counter_name, counter_type) + else: + d = instance + r = getattr(d, name) + return d, r + + +class _MeterConfig: + """ + P4 meter configuration. + """ + + @staticmethod + def attrs(): + """ + Get the attributes in this scope. + + :return: list of scope attributes + """ + return ["cir", "cburst", "pir", "pburst"] + + def __init__(self, meter_name, meter_type): + self._meter_name = meter_name + self._meter_type = meter_type + self._msg = p4runtime_pb2.MeterConfig() + self._attrs = _MeterConfig.attrs() + + def __dir__(self): + return self._attrs + + def __setattr__(self, name, value): + if name[0] == "_": + super().__setattr__(name, value) + return + if name in self._attrs: + if not isinstance(value, int): + raise UserError(f"{name} must be an integer") + setattr(self._msg, name, value) + + def __getattr__(self, name): + if name in self._attrs: + return getattr(self._msg, name) + raise AttributeError( + f"'{self.__class__.__name__}' object has no attribute '{name}'") + + def msg(self): + """ + Create a meter config message. + + :return: meter config message + """ + return self._msg + + def _from_msg(self, msg): + self._msg.CopyFrom(msg) + + def __str__(self): + return str(self.msg()) + + @classmethod + def set_param(cls, instance, meter_name, meter_type, name, value): + """ + Set the value of a certain meter parameter. + + :param instance: meter instance + :param meter_name: meter name + :param meter_type: meter type + :param name: meter parameter name + :param value: meter parameter value + :return: updated meter + """ + if instance is None: + d = cls(meter_name, meter_type) + else: + d = instance + setattr(d, name, value) + return d + + @classmethod + def get_param(cls, instance, meter_name, meter_type, name): + """ + Get the value of a certain meter parameter. + + :param instance: meter instance + :param meter_name: meter name + :param meter_type: meter type + :param name: meter parameter name + :return: meter with parameter + """ + if instance is None: + d = cls(meter_name, meter_type) + else: + d = instance + r = getattr(d, name) + return d, r + + +class _IdleTimeout: + """ + P4 idle timeout. + """ + + @staticmethod + def attrs(): + """ + Get the attributes in this scope. + + :return: list of scope attributes + """ + return ["elapsed_ns"] + + def __init__(self): + self._msg = p4runtime_pb2.TableEntry.IdleTimeout() + self._attrs = _IdleTimeout.attrs() + + def __dir__(self): + return self._attrs + + def __setattr__(self, name, value): + if name[0] == "_": + super().__setattr__(name, value) + return + if name in self._attrs: + if not isinstance(value, int): + raise UserError(f"{name} must be an integer") + setattr(self._msg, name, value) + + def __getattr__(self, name): + if name in self._attrs: + return getattr(self._msg, name) + raise AttributeError( + f"'{self.__class__.__name__}' object has no attribute '{name}'") + + def msg(self): + """ + Create an idle timeout message. + + :return: idle timeout message + """ + return self._msg + + def _from_msg(self, msg): + self._msg.CopyFrom(msg) + + def __str__(self): + return str(self.msg()) + + @classmethod + def set_param(cls, instance, name, value): + """ + Set the value of a certain idle timeout parameter. + + :param instance: idle timeout instance + :param name: idle timeout parameter name + :param value: idle timeout parameter value + :return: updated idle timeout instance + """ + if instance is None: + d = cls() + else: + d = instance + setattr(d, name, value) + return d + + @classmethod + def get_param(cls, instance, name): + """ + Set the value of a certain idle timeout parameter. + + :param instance: idle timeout instance + :param name: idle timeout parameter name + :return: idle timeout instance with parameter + """ + if instance is None: + d = cls() + else: + d = instance + r = getattr(d, name) + return d, r + + +class TableEntry(_P4EntityBase): + """ + P4 table entry. + """ + + @enum.unique + class _ActionSpecType(enum.Enum): + NONE = 0 + DIRECT_ACTION = 1 + MEMBER_ID = 2 + GROUP_ID = 3 + ONESHOT = 4 + + @classmethod + def _action_spec_name_to_type(cls, name): + return { + "action": cls._ActionSpecType.DIRECT_ACTION, + "member_id": cls._ActionSpecType.MEMBER_ID, + "group_id": cls._ActionSpecType.GROUP_ID, + "oneshot": cls._ActionSpecType.ONESHOT, + }.get(name, None) + + def __init__(self, table_name=None): + super().__init__( + P4Type.table, P4RuntimeEntity.table_entry, + p4runtime_pb2.TableEntry, table_name) + self.match = MatchKey(table_name, self._info.match_fields) + self._action_spec_type = self._ActionSpecType.NONE + self._action_spec = None + self.action: Action + self.member_id = -1 + self.group_id = -1 + self.oneshot = None + self.priority = 0 + self.is_default = False + ap = _get_action_profile(table_name) + if ap is None: + self._support_members = False + self._support_groups = False + else: + self._support_members = True + self._support_groups = ap.with_selector + self._direct_counter = None + self._direct_meter = None + for res_id in self._info.direct_resource_ids: + prefix = (res_id & 0xff000000) >> 24 + if prefix == p4info_pb2.P4Ids.DIRECT_COUNTER: + self._direct_counter = CONTEXT.get_obj_by_id(res_id) + elif prefix == p4info_pb2.P4Ids.DIRECT_METER: + self._direct_meter = CONTEXT.get_obj_by_id(res_id) + self._counter_data = None + self._meter_config = None + self.idle_timeout_ns = 0 + self._time_since_last_hit = None + self._idle_timeout_behavior = None + table = CONTEXT.get_table(table_name) + if table.idle_timeout_behavior > 0: + self._idle_timeout_behavior = table.idle_timeout_behavior + self.metadata = b"" + self.__doc__ = f""" +An entry for table '{table_name}' + +Use <self>.info to display the P4Info entry for this table. + +To set the match key, use <self>.match['<field name>'] = <expr>. +Type <self>.match? for more details. +""" + if self._direct_counter is not None: + self.__doc__ += """ +To set the counter spec, use <self>.counter_data.byte_count and/or <self>.counter_data.packet_count. +To unset it, use <self>.counter_data = None or <self>.clear_counter_data(). +""" + if self._direct_meter is not None: + self.__doc__ += """ +To access the meter config, use <self>.meter_config.<cir|cburst|pir|pburst>. +To unset it, use <self>.meter_config = None or <self>.clear_meter_config(). +""" + if ap is None: + self.__doc__ += """ +To set the action specification (this is a direct table): +<self>.action = <instance of type Action>. +To set the value of action parameters, use <self>.action['<param name>'] = <expr>. +Type <self>.action? for more details. +""" + if self._support_members: + self.__doc__ += """ +Access the member_id with <self>.member_id. +""" + if self._support_groups: + self.__doc__ += """ +Or access the group_id with <self>.group_id. +""" + if self._idle_timeout_behavior is not None: + self.__doc__ += """ +To access the time this entry was last hit, use <self>.time_since_last_hit.elapsed_ns. +To unset it, use <self>.time_since_last_hit = None or <self>.clear_time_since_last_hit(). +""" + self.__doc__ += """ +To set the priority, use <self>.priority = <expr>. + +To mark the entry as default, use <self>.is_default = True. + +To add an idle timeout to the entry, use <self>.idle_timeout_ns = <expr>. + +To add metadata to the entry, use <self>.metadata = <expr>. +""" + if ap is None: + self.__doc__ += """ +Typical usage to insert a table entry: +t = table_entry['<table_name>'](action='<action_name>') +t.match['<f1>'] = ... +... +t.match['<fN>'] = ... +# OR t.match.set(f1=..., ..., fN=...) +t.action['<p1>'] = ... +... +t.action['<pM>'] = ... +# OR t.action.set(p1=..., ..., pM=...) +t.insert + +Typical usage to set the default entry: +t = table_entry['<table_name>'](is_default=True) +t.action['<p1>'] = ... +... +t.action['<pM>'] = ... +# OR t.action.set(p1=..., ..., pM=...) +t.modify +""" + else: + self.__doc__ += """ +Typical usage to insert a table entry: +t = table_entry['<table_name>'] +t.match['<f1>'] = ... +... +t.match['<fN>'] = ... +# OR t.match.set(f1=..., ..., fN=...) +t.member_id = <expr> +""" + self.__doc__ += """ +For information about how to read table entries, use <self>.read? +""" + + self._init = True + + def __dir__(self): + d = super().__dir__() + [ + "match", "priority", "is_default", "idle_timeout_ns", "metadata", + "clear_action", "clear_match", "clear_counter_data", + "clear_meter_config", + "clear_time_since_last_hit"] + if self._support_groups: + d.extend(["member_id", "group_id", "oneshot"]) + elif self._support_members: + d.append("member_id") + else: + d.append("action") + if self._direct_counter is not None: + d.append("counter_data") + if self._direct_meter is not None: + d.append("meter_config") + if self._idle_timeout_behavior is not None: + d.append("time_since_last_hit") + return d + + def __call__(self, **kwargs): + for name, value in kwargs.items(): + if name == "action" and isinstance(value, str): + value = Action(value) + setattr(self, name, value) + return self + + def _action_spec_set_member(self, member_id): + if isinstance(member_id, type(None)): + if self._action_spec_type == self._ActionSpecType.MEMBER_ID: + super().__setattr__("_action_spec_type", + self._ActionSpecType.NONE) + super().__setattr__("_action_spec", None) + return + if not isinstance(member_id, int): + raise UserError("member_id must be an integer") + if not self._support_members: + raise UserError("Table does not have an action profile and " + "therefore does not support members") + super().__setattr__("_action_spec_type", self._ActionSpecType.MEMBER_ID) + super().__setattr__("_action_spec", member_id) + + def _action_spec_set_group(self, group_id): + if isinstance(group_id, type(None)): + if self._action_spec_type == self._ActionSpecType.GROUP_ID: + super().__setattr__("_action_spec_type", + self._ActionSpecType.NONE) + super().__setattr__("_action_spec", None) + return + if not isinstance(group_id, int): + raise UserError("group_id must be an integer") + if not self._support_groups: + raise UserError( + "Table does not have an action profile with selector " + "and therefore does not support groups") + super().__setattr__("_action_spec_type", self._ActionSpecType.GROUP_ID) + super().__setattr__("_action_spec", group_id) + + def _action_spec_set_action(self, action): + if isinstance(action, type(None)): + if self._action_spec_type == self._ActionSpecType.DIRECT_ACTION: + super().__setattr__("_action_spec_type", + self._ActionSpecType.NONE) + super().__setattr__("_action_spec", None) + return + if not isinstance(action, Action): + raise UserError("action must be an instance of Action") + if self._info.implementation_id != 0: + raise UserError( + "Table has an implementation and therefore " + "does not support direct actions (P4Runtime 1.0 doesn't " + "support writing the default action for indirect tables") + if not self._is_valid_action_id(action._action_id): + raise UserError(f"action '{action.action_name}' is not a valid " + f"action for this table") + super().__setattr__("_action_spec_type", + self._ActionSpecType.DIRECT_ACTION) + super().__setattr__("_action_spec", action) + + def _action_spec_set_oneshot(self, oneshot): + if isinstance(oneshot, type(None)): + if self._action_spec_type == self._ActionSpecType.ONESHOT: + super().__setattr__("_action_spec_type", + self._ActionSpecType.NONE) + super().__setattr__("_action_spec", None) + return + if not isinstance(oneshot, Oneshot): + raise UserError("oneshot must be an instance of Oneshot") + if not self._support_groups: + raise UserError( + "Table does not have an action profile with selector " + "and therefore does not support oneshot programming") + if self.name != oneshot.table_name: + raise UserError( + "This Oneshot instance was not created for this table") + super().__setattr__("_action_spec_type", self._ActionSpecType.ONESHOT) + super().__setattr__("_action_spec", oneshot) + + def __setattr__(self, name, value): + if name[0] == "_" or not self._init: + super().__setattr__(name, value) + return + if name == "name": + raise UserError("Cannot change table name") + if name == "priority": + if not isinstance(value, int): + raise UserError("priority must be an integer") + if name == "match" and not isinstance(value, MatchKey): + raise UserError("match must be an instance of MatchKey") + if name == "is_default": + if not isinstance(value, bool): + raise UserError("is_default must be a boolean") + # TODO: handle other cases # pylint: disable=W0511 + # is_default is set to True)? + if value is True and self.match._count() > 0: + print("Clearing match key because entry is now default") + self.match.clear() + if name == "member_id": + self._action_spec_set_member(value) + return + if name == "group_id": + self._action_spec_set_group(value) + return + if name == "oneshot": + self._action_spec_set_oneshot(value) + if name == "action" and value is not None: + self._action_spec_set_action(value) + return + if name == "counter_data": + if self._direct_counter is None: + raise UserError("Table has no direct counter") + if value is None: + self._counter_data = None + return + raise UserError("Cannot set 'counter_data' directly") + if name == "meter_config": + if self._direct_meter is None: + raise UserError("Table has no direct meter") + if value is None: + self._meter_config = None + return + raise UserError("Cannot set 'meter_config' directly") + if name == "idle_timeout_ns": + if not isinstance(value, int): + raise UserError("idle_timeout_ns must be an integer") + if name == "time_since_last_hit": + if self._idle_timeout_behavior is None: + raise UserError("Table has no idle timeouts") + if value is None: + self._time_since_last_hit = None + return + raise UserError("Cannot set 'time_since_last_hit' directly") + if name == "metadata": + if not isinstance(value, bytes): + raise UserError("metadata must be a byte string") + super().__setattr__(name, value) + + def __getattr__(self, name): + if name == "counter_data": + if self._direct_counter is None: + raise UserError("Table has no direct counter") + if self._counter_data is None: + self._counter_data = _CounterData( + self._direct_counter.preamble.name, + self._direct_counter.spec.unit) + return self._counter_data + if name == "meter_config": + if self._direct_meter is None: + raise UserError("Table has no direct meter") + if self._meter_config is None: + self._meter_config = _MeterConfig( + self._direct_meter.preamble.name, + self._direct_meter.spec.unit) + return self._meter_config + if name == "time_since_last_hit": + if self._idle_timeout_behavior is None: + raise UserError("Table has no idle timeouts") + if self._time_since_last_hit is None: + self._time_since_last_hit = _IdleTimeout() + return self._time_since_last_hit + + t = self._action_spec_name_to_type(name) + if t is None: + return super().__getattr__(name) + if self._action_spec_type == t: + return self._action_spec + if t == self._ActionSpecType.ONESHOT: + self._action_spec_type = self._ActionSpecType.ONESHOT + self._action_spec = Oneshot(self.name) + return self._action_spec + return None + + def _is_valid_action_id(self, action_id): + for action_ref in self._info.action_refs: + if action_id == action_ref.id: + return True + return False + + def _from_msg(self, msg): + self.priority = msg.priority + self.is_default = msg.is_default_action + self.idle_timeout_ns = msg.idle_timeout_ns + self.metadata = msg.metadata + for mf in msg.match: + mf_name = CONTEXT.get_mf_name(self.name, mf.field_id) + self.match._mk[mf_name] = mf + if msg.action.HasField('action'): + action = msg.action.action + action_name = CONTEXT.get_name_from_id(action.action_id) + self.action = Action(action_name) + self.action._from_msg(action) + elif msg.action.HasField('action_profile_member_id'): + self.member_id = msg.action.action_profile_member_id + elif msg.action.HasField('action_profile_group_id'): + self.group_id = msg.action.action_profile_group_id + elif msg.action.HasField('action_profile_action_set'): + self.oneshot = Oneshot(self.name) + self.oneshot._from_msg(msg.action.action_profile_action_set) + if msg.HasField('counter_data'): + self._counter_data = _CounterData( + self._direct_counter.preamble.name, + self._direct_counter.spec.unit) + self._counter_data._from_msg(msg.counter_data) + else: + self._counter_data = None + if msg.HasField('meter_config'): + self._meter_config = _MeterConfig( + self._direct_meter.preamble.name, self._direct_meter.spec.unit) + self._meter_config._from_msg(msg.meter_config) + else: + self._meter_config = None + if msg.HasField("time_since_last_hit"): + self._time_since_last_hit = _IdleTimeout() + self._time_since_last_hit._from_msg(msg.time_since_last_hit) + else: + self._time_since_last_hit = None + + def read(self, function=None): + """Generate a P4Runtime Read RPC. Supports wildcard reads (just leave + the appropriate fields unset). + If function is None, returns an iterator. Iterate over it to get all the + table entries (TableEntry instances) returned by the server. Otherwise, + function is applied to all the table entries returned by the server. + + For example: + for te in <self>.read(): + print(te) + The above code is equivalent to the following one-liner: + <self>.read(lambda te: print(te)) + + To delete all the entries from a table, simply use: + table_entry['<table_name>'].read(function=lambda x: x.delete()) + """ + return super().read(function) + + def _update_msg(self): + entry = p4runtime_pb2.TableEntry() + entry.table_id = self.id + entry.match.extend(self.match._mk.values()) + entry.priority = self.priority + entry.is_default_action = self.is_default + entry.idle_timeout_ns = self.idle_timeout_ns + entry.metadata = self.metadata + if self._action_spec_type == self._ActionSpecType.DIRECT_ACTION: + entry.action.action.CopyFrom(self._action_spec.msg()) + elif self._action_spec_type == self._ActionSpecType.MEMBER_ID: + entry.action.action_profile_member_id = self._action_spec + elif self._action_spec_type == self._ActionSpecType.GROUP_ID: + entry.action.action_profile_group_id = self._action_spec + elif self._action_spec_type == self._ActionSpecType.ONESHOT: + entry.action.action_profile_action_set.CopyFrom( + self._action_spec.msg()) + if self._counter_data is None: + entry.ClearField('counter_data') + else: + entry.counter_data.CopyFrom(self._counter_data.msg()) + if self._meter_config is None: + entry.ClearField('meter_config') + else: + entry.meter_config.CopyFrom(self._meter_config.msg()) + if self._time_since_last_hit is None: + entry.ClearField("time_since_last_hit") + else: + entry.time_since_last_hit.CopyFrom(self._time_since_last_hit.msg()) + self._entry = entry + + def _validate_msg(self): + if self.is_default and self.match._count() > 0: + raise UserError("Match key must be empty for default entry, " + "use <self>.is_default = False " + "or <self>.match.clear " + "(whichever one is appropriate)") + + def clear_action(self): + """Clears the action spec for the TableEntry.""" + super().__setattr__("_action_spec_type", self._ActionSpecType.NONE) + super().__setattr__("_action_spec", None) + + def clear_match(self): + """Clears the match spec for the TableEntry.""" + self.match.clear() + + def clear_counter_data(self): + """Clear all counter data, same as <self>.counter_data = None""" + self._counter_data = None + + def clear_meter_config(self): + """Clear the meter config, same as <self>.meter_config = None""" + self._meter_config = None + + def clear_time_since_last_hit(self): + """Clear the idle timeout, same as <self>.time_since_last_hit = None""" + self._time_since_last_hit = None + + +class _CounterEntryBase(_P4EntityBase): + """ + Basic P4 counter entry. + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._counter_type = self._info.spec.unit + self.packet_count = -1 + self.byte_count = -1 + self._data = None + + def __dir__(self): + return super().__dir__() + _CounterData.attrs_for_counter_type( + self._counter_type) + [ + "clear_data"] + + def __call__(self, **kwargs): + for name, value in kwargs.items(): + setattr(self, name, value) + return self + + def __setattr__(self, name, value): + if name[0] == "_" or not self._init: + super().__setattr__(name, value) + return + if name == "name": + raise UserError("Cannot change counter name") + if name in ("byte_count", "packet_count"): + self._data = _CounterData.set_count( + self._data, self.name, self._counter_type, name, value) + return + if name == "data": + if value is None: + self._data = None + return + raise UserError("Cannot set 'data' directly") + super().__setattr__(name, value) + + def __getattr__(self, name): + if name in ("byte_count", "packet_count"): + self._data, r = _CounterData.get_count( + self._data, self.name, self._counter_type, name) + return r + if name == "data": + if self._data is None: + self._data = _CounterData(self.name, self._counter_type) + return self._data + return super().__getattr__(name) + + def _from_msg(self, msg): + self._entry.CopyFrom(msg) + if msg.HasField('data'): + self._data = _CounterData(self.name, self._counter_type) + self._data._from_msg(msg.data) + else: + self._data = None + + def _update_msg(self): + if self._data is None: + self._entry.ClearField('data') + else: + self._entry.data.CopyFrom(self._data.msg()) + + def clear_data(self): + """Clear all counter data, same as <self>.data = None""" + self._data = None + + +class CounterEntry(_CounterEntryBase): + """ + P4 counter entry. + """ + + def __init__(self, counter_name=None): + super().__init__( + P4Type.counter, P4RuntimeEntity.counter_entry, + p4runtime_pb2.CounterEntry, counter_name, + modify_only=True) + self._entry.counter_id = self.id + self.index = -1 + self.__doc__ = f""" +An entry for counter '{counter_name}' + +Use <self>.info to display the P4Info entry for this counter. + +Set the index with <self>.index = <expr>. +To reset it (e.g. for wildcard read), set it to None. + +Access byte count and packet count with <self>.byte_count / <self>.packet_count. + +To read from the counter, use <self>.read +To write to the counter, use <self>.modify +""" + self._init = True + + def __dir__(self): + return super().__dir__() + ["index", "data"] + + def __setattr__(self, name, value): + if name == "index": + if value is None: + self._entry.ClearField('index') + return + if not isinstance(value, int): + raise UserError("index must be an integer") + self._entry.index.index = value + return + super().__setattr__(name, value) + + def __getattr__(self, name): + if name == "index": + return self._entry.index.index + return super().__getattr__(name) + + def read(self, function=None): + """Generate a P4Runtime Read RPC. Supports wildcard reads (just leave + the index unset). + If function is None, returns an iterator. Iterate over it to get all the + counter entries (CounterEntry instances) returned by the + server. Otherwise, function is applied to all the counter entries + returned by the server. + + For example: + for c in <self>.read(): + print(c) + The above code is equivalent to the following one-liner: + <self>.read(lambda c: print(c)) + """ + return super().read(function) + + +class DirectCounterEntry(_CounterEntryBase): + """ + Direct P4 counter entry. + """ + + def __init__(self, direct_counter_name=None): + super().__init__( + P4Type.direct_counter, P4RuntimeEntity.direct_counter_entry, + p4runtime_pb2.DirectCounterEntry, direct_counter_name, + modify_only=True) + self._direct_table_id = self._info.direct_table_id + try: + self._direct_table_name = CONTEXT.get_name_from_id( + self._direct_table_id) + except KeyError as ex: + raise InvalidP4InfoError(f"direct_table_id {self._direct_table_id} " + f"is not a valid table id") from ex + self._table_entry = TableEntry(self._direct_table_name) + self.__doc__ = f""" +An entry for direct counter '{direct_counter_name}' + +Use <self>.info to display the P4Info entry for this direct counter. + +Set the table_entry with <self>.table_entry = <TableEntry instance>. +The TableEntry instance must be for the table to which the direct counter is +attached. +To reset it (e.g. for wildcard read), set it to None. It is the same as: +<self>.table_entry = TableEntry({self._direct_table_name}) + +Access byte count and packet count with <self>.byte_count / <self>.packet_count. + +To read from the counter, use <self>.read +To write to the counter, use <self>.modify +""" + self._init = True + + def __dir__(self): + return super().__dir__() + ["table_entry"] + + def __setattr__(self, name, value): + if name == "index": + raise UserError("Direct counters are not index-based") + if name == "table_entry": + if value is None: + self._table_entry = TableEntry(self._direct_table_name) + return + if not isinstance(value, TableEntry): + raise UserError("table_entry must be an instance of TableEntry") + if value.name != self._direct_table_name: + raise UserError(f"This DirectCounterEntry is for " + f"table '{self._direct_table_name}'") + self._table_entry = value + return + super().__setattr__(name, value) + + def __getattr__(self, name): + if name == "index": + raise UserError("Direct counters are not index-based") + if name == "table_entry": + return self._table_entry + return super().__getattr__(name) + + def _update_msg(self): + super()._update_msg() + if self._table_entry is None: + self._entry.ClearField('table_entry') + else: + self._entry.table_entry.CopyFrom(self._table_entry.msg()) + + def _from_msg(self, msg): + super()._from_msg(msg) + if msg.HasField('table_entry'): + self._table_entry._from_msg(msg.table_entry) + else: + self._table_entry = None + + def read(self, function=None): + """Generate a P4Runtime Read RPC. Supports wildcard reads (just leave + the index unset). + If function is None, returns an iterator. Iterate over it to get all the + direct counter entries (DirectCounterEntry instances) returned by the + server. Otherwise, function is applied to all the direct counter entries + returned by the server. + + For example: + for c in <self>.read(): + print(c) + The above code is equivalent to the following one-liner: + <self>.read(lambda c: print(c)) + """ + return super().read(function) + + +class _MeterEntryBase(_P4EntityBase): + """ + Basic P4 meter entry. + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._meter_type = self._info.spec.unit + self.index = -1 + self.cir = -1 + self.cburst = -1 + self.pir = -1 + self.pburst = -1 + self._config = None + + def __dir__(self): + return super().__dir__() + _MeterConfig.attrs() + ["clear_config"] + + def __call__(self, **kwargs): + for name, value in kwargs.items(): + setattr(self, name, value) + return self + + def __setattr__(self, name, value): + if name[0] == "_" or not self._init: + super().__setattr__(name, value) + return + if name == "name": + raise UserError("Cannot change meter name") + if name in _MeterConfig.attrs(): + self._config = _MeterConfig.set_param( + self._config, self.name, self._meter_type, name, value) + return + if name == "config": + if value is None: + self._config = None + return + raise UserError("Cannot set 'config' directly") + super().__setattr__(name, value) + + def __getattr__(self, name): + if name in _MeterConfig.attrs(): + self._config, r = _MeterConfig.get_param( + self._config, self.name, self._meter_type, name) + return r + if name == "config": + if self._config is None: + self._config = _MeterConfig(self.name, self._meter_type) + return self._config + return super().__getattr__(name) + + def _from_msg(self, msg): + self._entry.CopyFrom(msg) + if msg.HasField('config'): + self._config = _MeterConfig(self.name, self._meter_type) + self._config._from_msg(msg.config) + else: + self._config = None + + def _update_msg(self): + if self._config is None: + self._entry.ClearField('config') + else: + self._entry.config.CopyFrom(self._config.msg()) + + def clear_config(self): + """Clear the meter config, same as <self>.config = None""" + self._config = None + + +class MeterEntry(_MeterEntryBase): + """ + P4 meter entry. + """ + + def __init__(self, meter_name=None): + super().__init__( + P4Type.meter, P4RuntimeEntity.meter_entry, + p4runtime_pb2.MeterEntry, meter_name, + modify_only=True) + self._entry.meter_id = self.id + self.__doc__ = f""" +An entry for meter '{meter_name}' + +Use <self>.info to display the P4Info entry for this meter. + +Set the index with <self>.index = <expr>. +To reset it (e.g. for wildcard read), set it to None. + +Access meter rates and burst sizes with: +<self>.cir +<self>.cburst +<self>.pir +<self>.pburst + +To read from the meter, use <self>.read +To write to the meter, use <self>.modify +""" + self._init = True + + def __dir__(self): + return super().__dir__() + ["index", "config"] + + def __setattr__(self, name, value): + if name == "index": + if value is None: + self._entry.ClearField('index') + return + if not isinstance(value, int): + raise UserError("index must be an integer") + self._entry.index.index = value + return + super().__setattr__(name, value) + + def __getattr__(self, name): + if name == "index": + return self._entry.index.index + return super().__getattr__(name) + + def read(self, function=None): + """Generate a P4Runtime Read RPC. Supports wildcard reads (just leave + the index unset). + If function is None, returns an iterator. Iterate over it to get all the + meter entries (MeterEntry instances) returned by the + server. Otherwise, function is applied to all the meter entries + returned by the server. + + For example: + for c in <self>.read(): + print(c) + The above code is equivalent to the following one-liner: + <self>.read(lambda c: print(c)) + """ + return super().read(function) + + +class DirectMeterEntry(_MeterEntryBase): + """ + Direct P4 meter entry. + """ + + def __init__(self, direct_meter_name=None): + super().__init__( + P4Type.direct_meter, P4RuntimeEntity.direct_meter_entry, + p4runtime_pb2.DirectMeterEntry, direct_meter_name, + modify_only=True) + self._direct_table_id = self._info.direct_table_id + try: + self._direct_table_name = CONTEXT.get_name_from_id( + self._direct_table_id) + except KeyError as ex: + raise InvalidP4InfoError(f"direct_table_id {self._direct_table_id} " + f"is not a valid table id") from ex + self._table_entry = TableEntry(self._direct_table_name) + self.__doc__ = f""" +An entry for direct meter '{direct_meter_name}' + +Use <self>.info to display the P4Info entry for this direct meter. + +Set the table_entry with <self>.table_entry = <TableEntry instance>. +The TableEntry instance must be for the table to which the direct meter is attached. +To reset it (e.g. for wildcard read), set it to None. It is the same as: +<self>.table_entry = TableEntry({self._direct_table_name}) + +Access meter rates and burst sizes with: +<self>.cir +<self>.cburst +<self>.pir +<self>.pburst + +To read from the meter, use <self>.read +To write to the meter, use <self>.modify +""" + self._init = True + + def __dir__(self): + return super().__dir__() + ["table_entry"] + + def __setattr__(self, name, value): + if name == "index": + raise UserError("Direct meters are not index-based") + if name == "table_entry": + if value is None: + self._table_entry = TableEntry(self._direct_table_name) + return + if not isinstance(value, TableEntry): + raise UserError("table_entry must be an instance of TableEntry") + if value.name != self._direct_table_name: + raise UserError(f"This DirectMeterEntry is for " + f"table '{self._direct_table_name}'") + self._table_entry = value + return + super().__setattr__(name, value) + + def __getattr__(self, name): + if name == "index": + raise UserError("Direct meters are not index-based") + if name == "table_entry": + return self._table_entry + return super().__getattr__(name) + + def _update_msg(self): + super()._update_msg() + if self._table_entry is None: + self._entry.ClearField('table_entry') + else: + self._entry.table_entry.CopyFrom(self._table_entry.msg()) + + def _from_msg(self, msg): + super()._from_msg(msg) + if msg.HasField('table_entry'): + self._table_entry._from_msg(msg.table_entry) + else: + self._table_entry = None + + def read(self, function=None): + """Generate a P4Runtime Read RPC. Supports wildcard reads (just leave + the index unset). + If function is None, returns an iterator. Iterate over it to get all the + direct meter entries (DirectMeterEntry instances) returned by the + server. Otherwise, function is applied to all the direct meter entries + returned by the server. + + For example: + for c in <self>.read(): + print(c) + The above code is equivalent to the following one-liner: + <self>.read(lambda c: print(c)) + """ + return super().read(function) + + +class P4RuntimeEntityBuilder: + """ + P4 entity builder. + """ + + def __init__(self, obj_type, entity_type, entity_cls): + self._obj_type = obj_type + self._names = sorted([name for name, _ in CONTEXT.get_objs(obj_type)]) + self._entity_type = entity_type + self._entity_cls = entity_cls + self.__doc__ = f"""Construct a {entity_cls.__name__} entity +Usage: <var> = {entity_type.name}["<{obj_type.pretty_name} name>"] +This is equivalent to <var>={entity_cls.__name__}(<{obj_type.pretty_name} name>) +Use command '{obj_type.p4info_name}' to see list of {obj_type.pretty_names} +""" + + def _ipython_key_completions_(self): + return self._names + + def __getitem__(self, name): + obj = CONTEXT.get_obj(self._obj_type, name) + if obj is None: + raise UserError( + f"{self._obj_type.pretty_name} '{name}' does not exist") + return self._entity_cls(name) + + def __setitem__(self, name, value): + raise UserError("Operation not allowed") + + def __str__(self): + return f"Construct a {self.entity_cls.__name__} entity" + + +class Replica: + """ + A port "replica" (port number + instance id) used for multicast + and clone session programming. + Construct with Replica(egress_port, instance=<instance>). + You can set / get attributes egress_port (required), instance (default 0). + """ + + def __init__(self, egress_port=None, instance=0): + if egress_port is None: + raise UserError("egress_port is required") + self._msg = p4runtime_pb2.Replica() + self._msg.egress_port = egress_port + self._msg.instance = instance + + def __dir__(self): + return ["port", "egress_port", "instance"] + + def __setattr__(self, name, value): + if name[0] == "_": + super().__setattr__(name, value) + return + if name in ("egress_port", "port"): + if not isinstance(value, int): + raise UserError("egress_port must be an integer") + self._msg.egress_port = value + return + if name == "instance": + if not isinstance(value, int): + raise UserError("instance must be an integer") + self._msg.instance = value + return + super().__setattr__(name, value) + + def __getattr__(self, name): + if name in ("egress_port", "port"): + return self._msg.egress_port + if name == "instance": + return self._msg.instance + return super().__getattr__(name) + + def __str__(self): + return str(self._msg) + + +class MulticastGroupEntry(_EntityBase): + """ + P4 multicast group entry. + """ + + def __init__(self, group_id=0): + super().__init__( + P4RuntimeEntity.packet_replication_engine_entry, + p4runtime_pb2.PacketReplicationEngineEntry) + self.group_id = group_id + self.replicas = [] + self.__doc__ = """ +Multicast group entry. +Create an instance with multicast_group_entry(<group_id>). +Add replicas with <self>.add(<eg_port_1>, <instance_1>).add(<eg_port_2>, <instance_2>)... +""" + self._init = True + + def __dir__(self): + return ["group_id", "replicas"] + + def __setattr__(self, name, value): + if name[0] == "_": + super().__setattr__(name, value) + return + if name == "group_id": + if not isinstance(value, int): + raise UserError("group_id must be an integer") + if name == "replicas": + if not isinstance(value, list): + raise UserError("replicas must be a list of Replica objects") + for r in value: + if not isinstance(r, Replica): + raise UserError( + "replicas must be a list of Replica objects") + super().__setattr__(name, value) + + def _from_msg(self, msg): + self.group_id = msg.multicast_group_entry.multicast_group_id + for r in msg.multicast_group_entry.replicas: + self.add(r.egress_port, r.instance) + + def read(self, function=None): + """Generate a P4Runtime Read RPC. Supports wildcard reads (just leave + the group_id as 0). + If function is None, returns an iterator. Iterate over it to get all the + multicast group entries (MulticastGroupEntry instances) returned by the + server. Otherwise, function is applied to all the multicast group entries + returned by the server. + + For example: + for c in <self>.read(): + print(c) + The above code is equivalent to the following one-liner: + <self>.read(lambda c: print(c)) + """ + return super().read(function) + + def _update_msg(self): + entry = p4runtime_pb2.PacketReplicationEngineEntry() + mcg_entry = entry.multicast_group_entry + mcg_entry.multicast_group_id = self.group_id + for replica in self.replicas: + r = mcg_entry.replicas.add() + r.CopyFrom(replica._msg) + self._entry = entry + + def add(self, egress_port=None, instance=0): + """Add a replica to the multicast group.""" + self.replicas.append(Replica(egress_port, instance)) + return self + + def _write(self, type_): + if self.group_id == 0: + raise UserError("0 is not a valid group_id for MulticastGroupEntry") + super()._write(type_) + + +class CloneSessionEntry(_EntityBase): + """ + P4 clone session entry. + """ + + def __init__(self, session_id=0): + super().__init__( + P4RuntimeEntity.packet_replication_engine_entry, + p4runtime_pb2.PacketReplicationEngineEntry) + self.session_id = session_id + self.replicas = [] + self.cos = 0 + self.packet_length_bytes = 0 + self.__doc__ = """ +Clone session entry. +Create an instance with clone_session_entry(<session_id>). +Add replicas with <self>.add(<eg_port_1>, <instance_1>).add(<eg_port_2>, +<instance_2>)... +Access class of service with <self>.cos. +Access truncation length with <self>.packet_length_bytes. +""" + self._init = True + + def __dir__(self): + return ["session_id", "replicas", "cos", "packet_length_bytes"] + + def __setattr__(self, name, value): + if name[0] == "_": + super().__setattr__(name, value) + return + if name == "session_id": + if not isinstance(value, int): + raise UserError("session_id must be an integer") + if name == "replicas": + if not isinstance(value, list): + raise UserError("replicas must be a list of Replica objects") + for r in value: + if not isinstance(r, Replica): + raise UserError( + "replicas must be a list of Replica objects") + if name == "cos": + if not isinstance(value, int): + raise UserError("cos must be an integer") + if name == "packet_length_bytes": + if not isinstance(value, int): + raise UserError("packet_length_bytes must be an integer") + super().__setattr__(name, value) + + def _from_msg(self, msg): + self.session_id = msg.clone_session_entry.session_id + for r in msg.clone_session_entry.replicas: + self.add(r.egress_port, r.instance) + self.cos = msg.clone_session_entry.class_of_service + self.packet_length_bytes = msg.clone_session_entry.packet_length_bytes + + def read(self, function=None): + """Generate a P4Runtime Read RPC. Supports wildcard reads (just leave + the session_id as 0). + If function is None, returns an iterator. Iterate over it to get all the + clone session entries (CloneSessionEntry instances) returned by the + server. Otherwise, function is applied to all the clone session entries + returned by the server. + + For example: + for c in <self>.read(): + print(c) + The above code is equivalent to the following one-liner: + <self>.read(lambda c: print(c)) + """ + return super().read(function) + + def _update_msg(self): + entry = p4runtime_pb2.PacketReplicationEngineEntry() + cs_entry = entry.clone_session_entry + cs_entry.session_id = self.session_id + for replica in self.replicas: + r = cs_entry.replicas.add() + r.CopyFrom(replica._msg) + cs_entry.class_of_service = self.cos + cs_entry.packet_length_bytes = self.packet_length_bytes + self._entry = entry + + def add(self, egress_port=None, instance=0): + """Add a replica to the clone session.""" + self.replicas.append(Replica(egress_port, instance)) + return self + + def _write(self, type_): + if self.session_id == 0: + raise UserError("0 is not a valid group_id for CloneSessionEntry") + super()._write(type_) + + +class PacketMetadata: + """ + P4 packet metadata. + """ + + def __init__(self, metadata_info_list): + self._md_info = OrderedDict() + self._md = OrderedDict() + # Initialize every metadata to zero value + for md in metadata_info_list: + self._md_info[md.name] = md + self._md[md.name] = self._parse_md('0', md) + self._set_docstring() + + def _set_docstring(self): + self.__doc__ = "Available metadata:\n\n" + for _, info in self._md_info.items(): + self.__doc__ += str(info) + self.__doc__ += """ +Set a metadata value with <self>.['<metadata_name>'] = '...' + +You may also use <self>.set(<md_name>='<value>') +""" + + def __dir__(self): + return ["clear"] + + def _get_md_info(self, name): + if name in self._md_info: + return self._md_info[name] + raise UserError(f"'{name}' is not a valid metadata name") + + def __getitem__(self, name): + _ = self._get_md_info(name) + print(self._md.get(name, "Unset")) + + def _parse_md(self, value, md_info): + if not isinstance(value, str): + raise UserError("Metadata value must be a string") + md = p4runtime_pb2.PacketMetadata() + md.metadata_id = md_info.id + md.value = encode(value.strip(), md_info.bitwidth) + return md + + def __setitem__(self, name, value): + md_info = self._get_md_info(name) + self._md[name] = self._parse_md(value, md_info) + + def _ipython_key_completions_(self): + return self._md_info.keys() + + def set(self, **kwargs): + """ + Set packet metadata parameters. + + :param kwargs: packet metadata parameter map + :return: void + """ + for name, value in kwargs.items(): + self[name] = value + + def clear(self): + """ + Clear packet metadata. + + :return: void + """ + self._md.clear() + + def values(self): + """ + Get packet metadata values. + + :return: list of packet metadata values + """ + return self._md.values() + + +class PacketIn(): + """ + P4 packet in. + """ + + def __init__(self): + ctrl_pkt_md = P4Objects(P4Type.controller_packet_metadata) + self.md_info_list = {} + if "packet_in" in ctrl_pkt_md: + self.p4_info = ctrl_pkt_md["packet_in"] + for md_info in self.p4_info.metadata: + self.md_info_list[md_info.name] = md_info + self.packet_in_queue = queue.Queue() + + def _packet_in_recv_func(packet_in_queue): + while True: + msg = CLIENT.get_stream_packet("packet", timeout=None) + if not msg: + break + packet_in_queue.put(msg) + + self.recv_t = Thread(target=_packet_in_recv_func, + args=(self.packet_in_queue,)) + self.recv_t.start() + + def sniff(self, function=None, timeout=None): + """ + Return an iterator of packet-in messages. + If the function is provided, we do not return an iterator; + instead we apply the function to every packet-in message. + + :param function: packet-in function + :param timeout: timeout in seconds + :return: list of packet-in messages + """ + msgs = [] + + if timeout is not None and timeout < 0: + raise ValueError("Timeout can't be a negative number.") + + if timeout is None: + while True: + try: + msgs.append(self.packet_in_queue.get(block=True)) + except KeyboardInterrupt: + # User sends a Ctrl+C -> breaking + break + + else: # timeout parameter is provided + deadline = time.time() + timeout + remaining_time = timeout + while remaining_time > 0: + try: + msgs.append(self.packet_in_queue.get(block=True, + timeout=remaining_time)) + remaining_time = deadline - time.time() + except KeyboardInterrupt: + # User sends an interrupt(e.g., Ctrl+C). + break + except queue.Empty: + # No item available on timeout. Exiting + break + + if function is None: + return iter(msgs) + for msg in msgs: + function(msg) + + def str(self): + """ + Packet-in metadata to string. + + :return: void + """ + for name, info in self.md_info_list.itmes(): + print(f"Packet-in metadata attribute '{name}':'{info}'") + + +class PacketOut: + """ + P4 packet out. + """ + + def __init__(self, payload=b'', **kwargs): + + self.p4_info = P4Objects(P4Type.controller_packet_metadata)[ + "packet_out"] + self._entry = None + self.payload = payload + self.metadata = PacketMetadata(self.p4_info.metadata) + if kwargs: + for key, value in kwargs.items(): + self.metadata[key] = value + + def _update_msg(self): + self._entry = p4runtime_pb2.PacketOut() + self._entry.payload = self.payload + self._entry.metadata.extend(self.metadata.values()) + + def __setattr__(self, name, value): + if name == "payload" and not isinstance(value, bytes): + raise UserError("payload must be a bytes type") + if name == "metadata" and not isinstance(value, PacketMetadata): + raise UserError("metadata must be a PacketMetadata type") + return super().__setattr__(name, value) + + def __dir__(self): + return ["metadata", "send", "payload"] + + def __str__(self): + self._update_msg() + return str(self._entry) + + def send(self): + """ + Send a packet-out message. + + :return: void + """ + self._update_msg() + msg = p4runtime_pb2.StreamMessageRequest() + msg.packet.CopyFrom(self._entry) + CLIENT.stream_out_q.put(msg) + + def str(self): + """ + Packet-out metadata to string. + + :return: void + """ + for key, value in self.metadata.itmes(): + print(f"Packet-out metadata attribute '{key}':'{value}'") + + +class IdleTimeoutNotification(): + """ + P4 idle timeout notification. + """ + + def __init__(self): + self.notification_queue = queue.Queue() + + def _notification_recv_func(notification_queue): + while True: + msg = CLIENT.get_stream_packet("idle_timeout_notification", + timeout=None) + if not msg: + break + notification_queue.put(msg) + + self.recv_t = Thread(target=_notification_recv_func, + args=(self.notification_queue,)) + self.recv_t.start() + + def sniff(self, function=None, timeout=None): + """ + Return an iterator of notification messages. + If the function is provided, we do not return an iterator and instead we apply + the function to every notification message. + """ + msgs = [] + + if timeout is not None and timeout < 0: + raise ValueError("Timeout can't be a negative number.") + + if timeout is None: + while True: + try: + msgs.append(self.notification_queue.get(block=True)) + except KeyboardInterrupt: + # User sends a Ctrl+C -> breaking + break + + else: # timeout parameter is provided + deadline = time.time() + timeout + remaining_time = timeout + while remaining_time > 0: + try: + msgs.append(self.notification_queue.get(block=True, + timeout=remaining_time)) + remaining_time = deadline - time.time() + except KeyboardInterrupt: + # User sends an interrupt(e.g., Ctrl+C). + break + except queue.Empty: + # No item available on timeout. Exiting + break + + if function is None: + return iter(msgs) + for msg in msgs: + function(msg) diff --git a/src/device/service/drivers/p4/p4_util.py b/src/device/service/drivers/p4/p4_util.py deleted file mode 100644 index b3d54499f56772768dc19bc1cae3bbf9a25e7dc2..0000000000000000000000000000000000000000 --- a/src/device/service/drivers/p4/p4_util.py +++ /dev/null @@ -1,271 +0,0 @@ -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -P4 driver utilities. -""" - -import logging -import queue -import sys -import threading -from functools import wraps -import grpc -import google.protobuf.text_format -from google.rpc import code_pb2 - -from p4.v1 import p4runtime_pb2 -from p4.v1 import p4runtime_pb2_grpc - -P4_ATTR_DEV_ID = 'id' -P4_ATTR_DEV_NAME = 'name' -P4_ATTR_DEV_VENDOR = 'vendor' -P4_ATTR_DEV_HW_VER = 'hw_ver' -P4_ATTR_DEV_SW_VER = 'sw_ver' -P4_ATTR_DEV_PIPECONF = 'pipeconf' - -P4_VAL_DEF_VENDOR = 'Unknown' -P4_VAL_DEF_HW_VER = 'BMv2 simple_switch' -P4_VAL_DEF_SW_VER = 'Stratum' -P4_VAL_DEF_PIPECONF = 'org.onosproject.pipelines.fabric' - -STREAM_ATTR_ARBITRATION = 'arbitration' -STREAM_ATTR_PACKET = 'packet' -STREAM_ATTR_DIGEST = 'digest' -STREAM_ATTR_UNKNOWN = 'unknown' - -LOGGER = logging.getLogger(__name__) - - -class P4RuntimeException(Exception): - """ - P4Runtime exception handler. - - Attributes - ---------- - grpc_error : object - gRPC error - """ - - def __init__(self, grpc_error): - super().__init__() - self.grpc_error = grpc_error - - def __str__(self): - return str('P4Runtime RPC error (%s): %s', - self.grpc_error.code().name(), self.grpc_error.details()) - - -def parse_p4runtime_error(fun): - """ - Parse P4Runtime error. - - :param fun: function - :return: parsed error - """ - @wraps(fun) - def handle(*args, **kwargs): - try: - return fun(*args, **kwargs) - except grpc.RpcError as rpc_ex: - raise P4RuntimeException(rpc_ex) from None - except Exception as ex: - raise Exception(ex) from None - return handle - - -class P4RuntimeClient: - """ - P4Runtime client. - - Attributes - ---------- - device_id : int - P4 device ID - grpc_address : str - IP address and port - election_id : tuple - Mastership election ID - role_name : str - Role name (optional) - """ - def __init__(self, device_id, grpc_address, election_id, role_name=None): - self.device_id = device_id - self.election_id = election_id - self.role_name = role_name - self.stream_in_q = None - self.stream_out_q = None - self.stream = None - self.stream_recv_thread = None - LOGGER.debug( - 'Connecting to device %d at %s', device_id, grpc_address) - self.channel = grpc.insecure_channel(grpc_address) - self.stub = p4runtime_pb2_grpc.P4RuntimeStub(self.channel) - try: - self.set_up_stream() - except P4RuntimeException: - LOGGER.critical('Failed to connect to P4Runtime server') - sys.exit(1) - - def set_up_stream(self): - """ - Set up a gRPC stream. - """ - self.stream_out_q = queue.Queue() - # queues for different messages - self.stream_in_q = { - STREAM_ATTR_ARBITRATION: queue.Queue(), - STREAM_ATTR_PACKET: queue.Queue(), - STREAM_ATTR_DIGEST: queue.Queue(), - STREAM_ATTR_UNKNOWN: queue.Queue(), - } - - def stream_req_iterator(): - while True: - st_p = self.stream_out_q.get() - if st_p is None: - break - yield st_p - - def stream_recv_wrapper(stream): - @parse_p4runtime_error - def stream_recv(): - for st_p in stream: - if st_p.HasField(STREAM_ATTR_ARBITRATION): - self.stream_in_q[STREAM_ATTR_ARBITRATION].put(st_p) - elif st_p.HasField(STREAM_ATTR_PACKET): - self.stream_in_q[STREAM_ATTR_PACKET].put(st_p) - elif st_p.HasField(STREAM_ATTR_DIGEST): - self.stream_in_q[STREAM_ATTR_DIGEST].put(st_p) - else: - self.stream_in_q[STREAM_ATTR_UNKNOWN].put(st_p) - try: - stream_recv() - except P4RuntimeException as ex: - LOGGER.critical('StreamChannel error, closing stream') - LOGGER.critical(ex) - for k in self.stream_in_q: - self.stream_in_q[k].put(None) - self.stream = self.stub.StreamChannel(stream_req_iterator()) - self.stream_recv_thread = threading.Thread( - target=stream_recv_wrapper, args=(self.stream,)) - self.stream_recv_thread.start() - self.handshake() - - def handshake(self): - """ - Handshake with gRPC server. - """ - - req = p4runtime_pb2.StreamMessageRequest() - arbitration = req.arbitration - arbitration.device_id = self.device_id - election_id = arbitration.election_id - election_id.high = self.election_id[0] - election_id.low = self.election_id[1] - if self.role_name is not None: - arbitration.role.name = self.role_name - self.stream_out_q.put(req) - - rep = self.get_stream_packet(STREAM_ATTR_ARBITRATION, timeout=2) - if rep is None: - LOGGER.critical('Failed to establish session with server') - sys.exit(1) - is_primary = (rep.arbitration.status.code == code_pb2.OK) - LOGGER.debug('Session established, client is %s', - 'primary' if is_primary else 'backup') - if not is_primary: - LOGGER.warning( - 'You are not the primary client, ' - 'you only have read access to the server') - - def get_stream_packet(self, type_, timeout=1): - """ - Get a new message from the stream. - - :param type_: stream type. - :param timeout: time to wait. - :return: message or None - """ - if type_ not in self.stream_in_q: - LOGGER.critical('Unknown stream type %s', type_) - return None - try: - msg = self.stream_in_q[type_].get(timeout=timeout) - return msg - except queue.Empty: # timeout expired - return None - - @parse_p4runtime_error - def get_p4info(self): - """ - Retrieve P4Info content. - - :return: P4Info object. - """ - - LOGGER.debug('Retrieving P4Info file') - req = p4runtime_pb2.GetForwardingPipelineConfigRequest() - req.device_id = self.device_id - req.response_type =\ - p4runtime_pb2.GetForwardingPipelineConfigRequest.P4INFO_AND_COOKIE - rep = self.stub.GetForwardingPipelineConfig(req) - return rep.config.p4info - - @parse_p4runtime_error - def set_fwd_pipe_config(self, p4info_path, bin_path): - """ - Configure the pipeline. - - :param p4info_path: path to the P4Info file - :param bin_path: path to the binary file - :return: - """ - - LOGGER.debug('Setting forwarding pipeline config') - req = p4runtime_pb2.SetForwardingPipelineConfigRequest() - req.device_id = self.device_id - if self.role_name is not None: - req.role = self.role_name - election_id = req.election_id - election_id.high = self.election_id[0] - election_id.low = self.election_id[1] - req.action =\ - p4runtime_pb2.SetForwardingPipelineConfigRequest.VERIFY_AND_COMMIT - with open(p4info_path, 'r', encoding='utf8') as f_1: - with open(bin_path, 'rb', encoding='utf8') as f_2: - try: - google.protobuf.text_format.Merge( - f_1.read(), req.config.p4info) - except google.protobuf.text_format.ParseError: - LOGGER.error('Error when parsing P4Info') - raise - req.config.p4_device_config = f_2.read() - return self.stub.SetForwardingPipelineConfig(req) - - def tear_down(self): - """ - Tear connection with the gRPC server down. - """ - - if self.stream_out_q: - LOGGER.debug('Cleaning up stream') - self.stream_out_q.put(None) - if self.stream_in_q: - for k in self.stream_in_q: - self.stream_in_q[k].put(None) - if self.stream_recv_thread: - self.stream_recv_thread.join() - self.channel.close() - del self.channel diff --git a/src/device/tests/Device_OpenConfig_Template.py b/src/device/tests/Device_OpenConfig_Template.py index 6afa2721ff920c39de243b308b9b9a4749cb013b..af339cce40b60f8ea0e310613c951968f4fc9aeb 100644 --- a/src/device/tests/Device_OpenConfig_Template.py +++ b/src/device/tests/Device_OpenConfig_Template.py @@ -32,9 +32,11 @@ DEVICE_OC_CONNECT_RULES = json_device_connect_rules(DEVICE_OC_ADDRESS, DEVICE_OC 'hostkey_verify' : True, 'look_for_keys' : True, 'allow_agent' : True, + 'delete_rule' : False, 'device_params' : {'name': 'default'}, 'manager_params' : {'timeout' : DEVICE_OC_TIMEOUT}, }) + DEVICE_OC_CONFIG_RULES = [] # populate your configuration rules to test DEVICE_OC_DECONFIG_RULES = [] # populate your deconfiguration rules to test diff --git a/src/device/tests/device_p4.py b/src/device/tests/device_p4.py index 4cd0a4c745d3a07b71f320ce79d73c95ffb0af37..ccc62c2195c8dae41a8e98b128b08965954d57f0 100644 --- a/src/device/tests/device_p4.py +++ b/src/device/tests/device_p4.py @@ -16,18 +16,23 @@ P4 device example configuration. """ -from common.tools.object_factory.ConfigRule import json_config_rule_set +import os +from common.tools.object_factory.ConfigRule import ( + json_config_rule_set, json_config_rule_delete) from common.tools.object_factory.Device import ( json_device_connect_rules, json_device_id, json_device_p4_disabled) -DEVICE_P4_DPID = 0 +CUR_PATH = os.path.dirname(os.path.abspath(__file__)) + +DEVICE_P4_DPID = 1 DEVICE_P4_NAME = 'device:leaf1' -DEVICE_P4_ADDRESS = '127.0.0.1' +DEVICE_P4_IP_ADDR = '127.0.0.1' DEVICE_P4_PORT = '50101' DEVICE_P4_VENDOR = 'Open Networking Foundation' DEVICE_P4_HW_VER = 'BMv2 simple_switch' DEVICE_P4_SW_VER = 'Stratum' -DEVICE_P4_PIPECONF = 'org.onosproject.pipelines.fabric' +DEVICE_P4_BIN_PATH = os.path.join(CUR_PATH, 'p4/test-bmv2.json') +DEVICE_P4_INFO_PATH = os.path.join(CUR_PATH, 'p4/test-p4info.txt') DEVICE_P4_WORKERS = 2 DEVICE_P4_GRACE_PERIOD = 60 DEVICE_P4_TIMEOUT = 60 @@ -37,16 +42,52 @@ DEVICE_P4_ID = json_device_id(DEVICE_P4_UUID) DEVICE_P4 = json_device_p4_disabled(DEVICE_P4_UUID) DEVICE_P4_CONNECT_RULES = json_device_connect_rules( - DEVICE_P4_ADDRESS, DEVICE_P4_PORT, { + DEVICE_P4_IP_ADDR, + DEVICE_P4_PORT, + { 'id': DEVICE_P4_DPID, 'name': DEVICE_P4_NAME, - 'hw-ver': DEVICE_P4_HW_VER, - 'sw-ver': DEVICE_P4_SW_VER, - 'pipeconf': DEVICE_P4_PIPECONF, - 'timeout': DEVICE_P4_TIMEOUT + 'vendor': DEVICE_P4_VENDOR, + 'hw_ver': DEVICE_P4_HW_VER, + 'sw_ver': DEVICE_P4_SW_VER, + 'timeout': DEVICE_P4_TIMEOUT, + 'p4bin': DEVICE_P4_BIN_PATH, + 'p4info': DEVICE_P4_INFO_PATH } ) -DEVICE_P4_CONFIG_RULES = [ - json_config_rule_set('key1', 'value1'), +DEVICE_P4_CONFIG_TABLE_ENTRY = [ + json_config_rule_set( + 'table', + { + 'table-name': 'IngressPipeImpl.acl_table', + 'match-fields': [ + { + 'match-field': 'hdr.ethernet.dst_addr', + 'match-value': 'aa:bb:cc:dd:ee:22 &&& ff:ff:ff:ff:ff:ff' + } + ], + 'action-name': 'IngressPipeImpl.clone_to_cpu', + 'action-params': [], + 'priority': 1 + } + ) +] + +DEVICE_P4_DECONFIG_TABLE_ENTRY = [ + json_config_rule_delete( + 'table', + { + 'table-name': 'IngressPipeImpl.acl_table', + 'match-fields': [ + { + 'match-field': 'hdr.ethernet.dst_addr', + 'match-value': 'aa:bb:cc:dd:ee:22 &&& ff:ff:ff:ff:ff:ff' + } + ], + 'action-name': 'IngressPipeImpl.clone_to_cpu', + 'action-params': [], + 'priority': 1 + } + ) ] diff --git a/src/device/tests/mock_p4runtime_service.py b/src/device/tests/mock_p4runtime_service.py index 77da0113676dc6f820d995b34915df6d0ba30f01..c1b2dcb45a18caf0c839f9bd8a68484bba5efbea 100644 --- a/src/device/tests/mock_p4runtime_service.py +++ b/src/device/tests/mock_p4runtime_service.py @@ -22,7 +22,7 @@ import grpc from p4.v1 import p4runtime_pb2_grpc from .device_p4 import( - DEVICE_P4_ADDRESS, DEVICE_P4_PORT, + DEVICE_P4_IP_ADDR, DEVICE_P4_PORT, DEVICE_P4_WORKERS, DEVICE_P4_GRACE_PERIOD) from .mock_p4runtime_servicer_impl import MockP4RuntimeServicerImpl @@ -35,7 +35,7 @@ class MockP4RuntimeService: """ def __init__( - self, address=DEVICE_P4_ADDRESS, port=DEVICE_P4_PORT, + self, address=DEVICE_P4_IP_ADDR, port=DEVICE_P4_PORT, max_workers=DEVICE_P4_WORKERS, grace_period=DEVICE_P4_GRACE_PERIOD): self.address = address diff --git a/src/device/tests/mock_p4runtime_servicer_impl.py b/src/device/tests/mock_p4runtime_servicer_impl.py index d29445da43afb58ef062f62c496b0780f92a4648..8a516303d9310be55662ef749175655c4069ae5c 100644 --- a/src/device/tests/mock_p4runtime_servicer_impl.py +++ b/src/device/tests/mock_p4runtime_servicer_impl.py @@ -22,11 +22,12 @@ from p4.v1 import p4runtime_pb2, p4runtime_pb2_grpc from p4.config.v1 import p4info_pb2 try: - from p4_util import STREAM_ATTR_ARBITRATION, STREAM_ATTR_PACKET + from p4_client import STREAM_ATTR_ARBITRATION, STREAM_ATTR_PACKET except ImportError: - from device.service.drivers.p4.p4_util import STREAM_ATTR_ARBITRATION,\ + from device.service.drivers.p4.p4_client import STREAM_ATTR_ARBITRATION,\ STREAM_ATTR_PACKET + class MockP4RuntimeServicerImpl(p4runtime_pb2_grpc.P4RuntimeServicer): """ A P4Runtime service implementation for testing purposes. diff --git a/src/device/tests/p4/test-bmv2.json b/src/device/tests/p4/test-bmv2.json new file mode 100644 index 0000000000000000000000000000000000000000..f6ef6af34907ae00bcfa1034bb317a8f270b8995 --- /dev/null +++ b/src/device/tests/p4/test-bmv2.json @@ -0,0 +1,1910 @@ +{ + "header_types" : [ + { + "name" : "scalars_0", + "id" : 0, + "fields" : [ + ["tmp_0", 1, false], + ["tmp_1", 1, false], + ["tmp", 1, false], + ["local_metadata_t.l4_src_port", 16, false], + ["local_metadata_t.l4_dst_port", 16, false], + ["local_metadata_t.is_multicast", 1, false], + ["local_metadata_t.next_srv6_sid", 128, false], + ["local_metadata_t.ip_proto", 8, false], + ["local_metadata_t.icmp_type", 8, false], + ["_padding_0", 4, false] + ] + }, + { + "name" : "standard_metadata", + "id" : 1, + "fields" : [ + ["ingress_port", 9, false], + ["egress_spec", 9, false], + ["egress_port", 9, false], + ["clone_spec", 32, false], + ["instance_type", 32, false], + ["drop", 1, false], + ["recirculate_port", 16, false], + ["packet_length", 32, false], + ["enq_timestamp", 32, false], + ["enq_qdepth", 19, false], + ["deq_timedelta", 32, false], + ["deq_qdepth", 19, false], + ["ingress_global_timestamp", 48, false], + ["egress_global_timestamp", 48, false], + ["lf_field_list", 32, false], + ["mcast_grp", 16, false], + ["resubmit_flag", 32, false], + ["egress_rid", 16, false], + ["recirculate_flag", 32, false], + ["checksum_error", 1, false], + ["parser_error", 32, false], + ["priority", 3, false], + ["_padding", 2, false] + ] + }, + { + "name" : "cpu_out_header_t", + "id" : 2, + "fields" : [ + ["egress_port", 9, false], + ["_pad", 7, false] + ] + }, + { + "name" : "cpu_in_header_t", + "id" : 3, + "fields" : [ + ["ingress_port", 9, false], + ["_pad", 7, false] + ] + }, + { + "name" : "ethernet_t", + "id" : 4, + "fields" : [ + ["dst_addr", 48, false], + ["src_addr", 48, false], + ["ether_type", 16, false] + ] + }, + { + "name" : "ipv4_t", + "id" : 5, + "fields" : [ + ["version", 4, false], + ["ihl", 4, false], + ["dscp", 6, false], + ["ecn", 2, false], + ["total_len", 16, false], + ["identification", 16, false], + ["flags", 3, false], + ["frag_offset", 13, false], + ["ttl", 8, false], + ["protocol", 8, false], + ["hdr_checksum", 16, false], + ["src_addr", 32, false], + ["dst_addr", 32, false] + ] + }, + { + "name" : "ipv6_t", + "id" : 6, + "fields" : [ + ["version", 4, false], + ["traffic_class", 8, false], + ["flow_label", 20, false], + ["payload_len", 16, false], + ["next_hdr", 8, false], + ["hop_limit", 8, false], + ["src_addr", 128, false], + ["dst_addr", 128, false] + ] + }, + { + "name" : "srv6h_t", + "id" : 7, + "fields" : [ + ["next_hdr", 8, false], + ["hdr_ext_len", 8, false], + ["routing_type", 8, false], + ["segment_left", 8, false], + ["last_entry", 8, false], + ["flags", 8, false], + ["tag", 16, false] + ] + }, + { + "name" : "tcp_t", + "id" : 8, + "fields" : [ + ["src_port", 16, false], + ["dst_port", 16, false], + ["seq_no", 32, false], + ["ack_no", 32, false], + ["data_offset", 4, false], + ["res", 3, false], + ["ecn", 3, false], + ["ctrl", 6, false], + ["window", 16, false], + ["checksum", 16, false], + ["urgent_ptr", 16, false] + ] + }, + { + "name" : "udp_t", + "id" : 9, + "fields" : [ + ["src_port", 16, false], + ["dst_port", 16, false], + ["len", 16, false], + ["checksum", 16, false] + ] + }, + { + "name" : "icmp_t", + "id" : 10, + "fields" : [ + ["type", 8, false], + ["icmp_code", 8, false], + ["checksum", 16, false], + ["identifier", 16, false], + ["sequence_number", 16, false], + ["timestamp", 64, false] + ] + }, + { + "name" : "icmpv6_t", + "id" : 11, + "fields" : [ + ["type", 8, false], + ["code", 8, false], + ["checksum", 16, false] + ] + }, + { + "name" : "ndp_t", + "id" : 12, + "fields" : [ + ["flags", 32, false], + ["target_ipv6_addr", 128, false], + ["type", 8, false], + ["length", 8, false], + ["target_mac_addr", 48, false] + ] + }, + { + "name" : "srv6_list_t", + "id" : 13, + "fields" : [ + ["segment_id", 128, false] + ] + } + ], + "headers" : [ + { + "name" : "scalars", + "id" : 0, + "header_type" : "scalars_0", + "metadata" : true, + "pi_omit" : true + }, + { + "name" : "standard_metadata", + "id" : 1, + "header_type" : "standard_metadata", + "metadata" : true, + "pi_omit" : true + }, + { + "name" : "cpu_out", + "id" : 2, + "header_type" : "cpu_out_header_t", + "metadata" : false, + "pi_omit" : true + }, + { + "name" : "cpu_in", + "id" : 3, + "header_type" : "cpu_in_header_t", + "metadata" : false, + "pi_omit" : true + }, + { + "name" : "ethernet", + "id" : 4, + "header_type" : "ethernet_t", + "metadata" : false, + "pi_omit" : true + }, + { + "name" : "ipv4", + "id" : 5, + "header_type" : "ipv4_t", + "metadata" : false, + "pi_omit" : true + }, + { + "name" : "ipv6", + "id" : 6, + "header_type" : "ipv6_t", + "metadata" : false, + "pi_omit" : true + }, + { + "name" : "srv6h", + "id" : 7, + "header_type" : "srv6h_t", + "metadata" : false, + "pi_omit" : true + }, + { + "name" : "tcp", + "id" : 8, + "header_type" : "tcp_t", + "metadata" : false, + "pi_omit" : true + }, + { + "name" : "udp", + "id" : 9, + "header_type" : "udp_t", + "metadata" : false, + "pi_omit" : true + }, + { + "name" : "icmp", + "id" : 10, + "header_type" : "icmp_t", + "metadata" : false, + "pi_omit" : true + }, + { + "name" : "icmpv6", + "id" : 11, + "header_type" : "icmpv6_t", + "metadata" : false, + "pi_omit" : true + }, + { + "name" : "ndp", + "id" : 12, + "header_type" : "ndp_t", + "metadata" : false, + "pi_omit" : true + }, + { + "name" : "srv6_list[0]", + "id" : 13, + "header_type" : "srv6_list_t", + "metadata" : false, + "pi_omit" : true + }, + { + "name" : "srv6_list[1]", + "id" : 14, + "header_type" : "srv6_list_t", + "metadata" : false, + "pi_omit" : true + }, + { + "name" : "srv6_list[2]", + "id" : 15, + "header_type" : "srv6_list_t", + "metadata" : false, + "pi_omit" : true + }, + { + "name" : "srv6_list[3]", + "id" : 16, + "header_type" : "srv6_list_t", + "metadata" : false, + "pi_omit" : true + } + ], + "header_stacks" : [ + { + "name" : "srv6_list", + "id" : 0, + "header_type" : "srv6_list_t", + "size" : 4, + "header_ids" : [13, 14, 15, 16] + } + ], + "header_union_types" : [], + "header_unions" : [], + "header_union_stacks" : [], + "field_lists" : [ + { + "id" : 1, + "name" : "fl", + "source_info" : { + "filename" : "p4src/main.p4", + "line" : 474, + "column" : 34, + "source_fragment" : "{ standard_metadata.ingress_port }" + }, + "elements" : [ + { + "type" : "field", + "value" : ["standard_metadata", "ingress_port"] + } + ] + } + ], + "errors" : [ + ["NoError", 1], + ["PacketTooShort", 2], + ["NoMatch", 3], + ["StackOutOfBounds", 4], + ["HeaderTooShort", 5], + ["ParserTimeout", 6], + ["ParserInvalidArgument", 7] + ], + "enums" : [], + "parsers" : [ + { + "name" : "parser", + "id" : 0, + "init_state" : "start", + "parse_states" : [ + { + "name" : "start", + "id" : 0, + "parser_ops" : [], + "transitions" : [ + { + "type" : "hexstr", + "value" : "0x00ff", + "mask" : null, + "next_state" : "parse_packet_out" + }, + { + "value" : "default", + "mask" : null, + "next_state" : "parse_ethernet" + } + ], + "transition_key" : [ + { + "type" : "field", + "value" : ["standard_metadata", "ingress_port"] + } + ] + }, + { + "name" : "parse_packet_out", + "id" : 1, + "parser_ops" : [ + { + "parameters" : [ + { + "type" : "regular", + "value" : "cpu_out" + } + ], + "op" : "extract" + } + ], + "transitions" : [ + { + "value" : "default", + "mask" : null, + "next_state" : "parse_ethernet" + } + ], + "transition_key" : [] + }, + { + "name" : "parse_ethernet", + "id" : 2, + "parser_ops" : [ + { + "parameters" : [ + { + "type" : "regular", + "value" : "ethernet" + } + ], + "op" : "extract" + } + ], + "transitions" : [ + { + "type" : "hexstr", + "value" : "0x0800", + "mask" : null, + "next_state" : "parse_ipv4" + }, + { + "type" : "hexstr", + "value" : "0x86dd", + "mask" : null, + "next_state" : "parse_ipv6" + }, + { + "value" : "default", + "mask" : null, + "next_state" : null + } + ], + "transition_key" : [ + { + "type" : "field", + "value" : ["ethernet", "ether_type"] + } + ] + }, + { + "name" : "parse_ipv4", + "id" : 3, + "parser_ops" : [ + { + "parameters" : [ + { + "type" : "regular", + "value" : "ipv4" + } + ], + "op" : "extract" + }, + { + "parameters" : [ + { + "type" : "field", + "value" : ["scalars", "local_metadata_t.ip_proto"] + }, + { + "type" : "field", + "value" : ["ipv4", "protocol"] + } + ], + "op" : "set" + } + ], + "transitions" : [ + { + "type" : "hexstr", + "value" : "0x06", + "mask" : null, + "next_state" : "parse_tcp" + }, + { + "type" : "hexstr", + "value" : "0x11", + "mask" : null, + "next_state" : "parse_udp" + }, + { + "type" : "hexstr", + "value" : "0x01", + "mask" : null, + "next_state" : "parse_icmp" + }, + { + "value" : "default", + "mask" : null, + "next_state" : null + } + ], + "transition_key" : [ + { + "type" : "field", + "value" : ["ipv4", "protocol"] + } + ] + }, + { + "name" : "parse_ipv6", + "id" : 4, + "parser_ops" : [ + { + "parameters" : [ + { + "type" : "regular", + "value" : "ipv6" + } + ], + "op" : "extract" + }, + { + "parameters" : [ + { + "type" : "field", + "value" : ["scalars", "local_metadata_t.ip_proto"] + }, + { + "type" : "field", + "value" : ["ipv6", "next_hdr"] + } + ], + "op" : "set" + } + ], + "transitions" : [ + { + "type" : "hexstr", + "value" : "0x06", + "mask" : null, + "next_state" : "parse_tcp" + }, + { + "type" : "hexstr", + "value" : "0x11", + "mask" : null, + "next_state" : "parse_udp" + }, + { + "type" : "hexstr", + "value" : "0x3a", + "mask" : null, + "next_state" : "parse_icmpv6" + }, + { + "type" : "hexstr", + "value" : "0x2b", + "mask" : null, + "next_state" : "parse_srv6" + }, + { + "value" : "default", + "mask" : null, + "next_state" : null + } + ], + "transition_key" : [ + { + "type" : "field", + "value" : ["ipv6", "next_hdr"] + } + ] + }, + { + "name" : "parse_tcp", + "id" : 5, + "parser_ops" : [ + { + "parameters" : [ + { + "type" : "regular", + "value" : "tcp" + } + ], + "op" : "extract" + }, + { + "parameters" : [ + { + "type" : "field", + "value" : ["scalars", "local_metadata_t.l4_src_port"] + }, + { + "type" : "field", + "value" : ["tcp", "src_port"] + } + ], + "op" : "set" + }, + { + "parameters" : [ + { + "type" : "field", + "value" : ["scalars", "local_metadata_t.l4_dst_port"] + }, + { + "type" : "field", + "value" : ["tcp", "dst_port"] + } + ], + "op" : "set" + } + ], + "transitions" : [ + { + "value" : "default", + "mask" : null, + "next_state" : null + } + ], + "transition_key" : [] + }, + { + "name" : "parse_udp", + "id" : 6, + "parser_ops" : [ + { + "parameters" : [ + { + "type" : "regular", + "value" : "udp" + } + ], + "op" : "extract" + }, + { + "parameters" : [ + { + "type" : "field", + "value" : ["scalars", "local_metadata_t.l4_src_port"] + }, + { + "type" : "field", + "value" : ["udp", "src_port"] + } + ], + "op" : "set" + }, + { + "parameters" : [ + { + "type" : "field", + "value" : ["scalars", "local_metadata_t.l4_dst_port"] + }, + { + "type" : "field", + "value" : ["udp", "dst_port"] + } + ], + "op" : "set" + } + ], + "transitions" : [ + { + "value" : "default", + "mask" : null, + "next_state" : null + } + ], + "transition_key" : [] + }, + { + "name" : "parse_icmp", + "id" : 7, + "parser_ops" : [ + { + "parameters" : [ + { + "type" : "regular", + "value" : "icmp" + } + ], + "op" : "extract" + }, + { + "parameters" : [ + { + "type" : "field", + "value" : ["scalars", "local_metadata_t.icmp_type"] + }, + { + "type" : "field", + "value" : ["icmp", "type"] + } + ], + "op" : "set" + } + ], + "transitions" : [ + { + "value" : "default", + "mask" : null, + "next_state" : null + } + ], + "transition_key" : [] + }, + { + "name" : "parse_icmpv6", + "id" : 8, + "parser_ops" : [ + { + "parameters" : [ + { + "type" : "regular", + "value" : "icmpv6" + } + ], + "op" : "extract" + }, + { + "parameters" : [ + { + "type" : "field", + "value" : ["scalars", "local_metadata_t.icmp_type"] + }, + { + "type" : "field", + "value" : ["icmpv6", "type"] + } + ], + "op" : "set" + } + ], + "transitions" : [ + { + "type" : "hexstr", + "value" : "0x87", + "mask" : null, + "next_state" : "parse_ndp" + }, + { + "type" : "hexstr", + "value" : "0x88", + "mask" : null, + "next_state" : "parse_ndp" + }, + { + "value" : "default", + "mask" : null, + "next_state" : null + } + ], + "transition_key" : [ + { + "type" : "field", + "value" : ["icmpv6", "type"] + } + ] + }, + { + "name" : "parse_ndp", + "id" : 9, + "parser_ops" : [ + { + "parameters" : [ + { + "type" : "regular", + "value" : "ndp" + } + ], + "op" : "extract" + } + ], + "transitions" : [ + { + "value" : "default", + "mask" : null, + "next_state" : null + } + ], + "transition_key" : [] + }, + { + "name" : "parse_srv6", + "id" : 10, + "parser_ops" : [ + { + "parameters" : [ + { + "type" : "regular", + "value" : "srv6h" + } + ], + "op" : "extract" + } + ], + "transitions" : [ + { + "value" : "default", + "mask" : null, + "next_state" : "parse_srv6_list" + } + ], + "transition_key" : [] + }, + { + "name" : "parse_srv6_list", + "id" : 11, + "parser_ops" : [ + { + "parameters" : [ + { + "type" : "stack", + "value" : "srv6_list" + } + ], + "op" : "extract" + }, + { + "parameters" : [ + { + "type" : "field", + "value" : ["scalars", "tmp_0"] + }, + { + "type" : "expression", + "value" : { + "type" : "expression", + "value" : { + "op" : "?", + "left" : { + "type" : "hexstr", + "value" : "0x01" + }, + "right" : { + "type" : "hexstr", + "value" : "0x00" + }, + "cond" : { + "type" : "expression", + "value" : { + "op" : "==", + "left" : { + "type" : "expression", + "value" : { + "op" : "&", + "left" : { + "type" : "expression", + "value" : { + "op" : "+", + "left" : { + "type" : "expression", + "value" : { + "op" : "&", + "left" : { + "type" : "field", + "value" : ["srv6h", "segment_left"] + }, + "right" : { + "type" : "hexstr", + "value" : "0xffffffff" + } + } + }, + "right" : { + "type" : "hexstr", + "value" : "0xffffffff" + } + } + }, + "right" : { + "type" : "hexstr", + "value" : "0xffffffff" + } + } + }, + "right" : { + "type" : "expression", + "value" : { + "op" : "last_stack_index", + "left" : null, + "right" : { + "type" : "header_stack", + "value" : "srv6_list" + } + } + } + } + } + } + } + } + ], + "op" : "set" + } + ], + "transitions" : [ + { + "type" : "hexstr", + "value" : "0x01", + "mask" : null, + "next_state" : "mark_current_srv6" + }, + { + "value" : "default", + "mask" : null, + "next_state" : "check_last_srv6" + } + ], + "transition_key" : [ + { + "type" : "field", + "value" : ["scalars", "tmp_0"] + } + ] + }, + { + "name" : "mark_current_srv6", + "id" : 12, + "parser_ops" : [ + { + "parameters" : [ + { + "type" : "field", + "value" : ["scalars", "local_metadata_t.next_srv6_sid"] + }, + { + "type" : "expression", + "value" : { + "type" : "stack_field", + "value" : ["srv6_list", "segment_id"] + } + } + ], + "op" : "set" + } + ], + "transitions" : [ + { + "value" : "default", + "mask" : null, + "next_state" : "check_last_srv6" + } + ], + "transition_key" : [] + }, + { + "name" : "check_last_srv6", + "id" : 13, + "parser_ops" : [ + { + "parameters" : [ + { + "type" : "field", + "value" : ["scalars", "tmp_1"] + }, + { + "type" : "expression", + "value" : { + "type" : "expression", + "value" : { + "op" : "?", + "left" : { + "type" : "hexstr", + "value" : "0x01" + }, + "right" : { + "type" : "hexstr", + "value" : "0x00" + }, + "cond" : { + "type" : "expression", + "value" : { + "op" : "==", + "left" : { + "type" : "expression", + "value" : { + "op" : "&", + "left" : { + "type" : "field", + "value" : ["srv6h", "last_entry"] + }, + "right" : { + "type" : "hexstr", + "value" : "0xffffffff" + } + } + }, + "right" : { + "type" : "expression", + "value" : { + "op" : "last_stack_index", + "left" : null, + "right" : { + "type" : "header_stack", + "value" : "srv6_list" + } + } + } + } + } + } + } + } + ], + "op" : "set" + } + ], + "transitions" : [ + { + "type" : "hexstr", + "value" : "0x01", + "mask" : null, + "next_state" : "parse_srv6_next_hdr" + }, + { + "type" : "hexstr", + "value" : "0x00", + "mask" : null, + "next_state" : "parse_srv6_list" + } + ], + "transition_key" : [ + { + "type" : "field", + "value" : ["scalars", "tmp_1"] + } + ] + }, + { + "name" : "parse_srv6_next_hdr", + "id" : 14, + "parser_ops" : [], + "transitions" : [ + { + "type" : "hexstr", + "value" : "0x06", + "mask" : null, + "next_state" : "parse_tcp" + }, + { + "type" : "hexstr", + "value" : "0x11", + "mask" : null, + "next_state" : "parse_udp" + }, + { + "type" : "hexstr", + "value" : "0x3a", + "mask" : null, + "next_state" : "parse_icmpv6" + }, + { + "value" : "default", + "mask" : null, + "next_state" : null + } + ], + "transition_key" : [ + { + "type" : "field", + "value" : ["srv6h", "next_hdr"] + } + ] + } + ] + } + ], + "parse_vsets" : [], + "deparsers" : [ + { + "name" : "deparser", + "id" : 0, + "source_info" : { + "filename" : "p4src/main.p4", + "line" : 602, + "column" : 8, + "source_fragment" : "DeparserImpl" + }, + "order" : ["cpu_in", "ethernet", "ipv4", "ipv6", "srv6h", "srv6_list[0]", "srv6_list[1]", "srv6_list[2]", "srv6_list[3]", "tcp", "udp", "icmp", "icmpv6", "ndp"] + } + ], + "meter_arrays" : [], + "counter_arrays" : [ + { + "name" : "l2_exact_table_counter", + "id" : 0, + "source_info" : { + "filename" : "p4src/main.p4", + "line" : 399, + "column" : 8, + "source_fragment" : "counters" + }, + "is_direct" : true, + "binding" : "IngressPipeImpl.l2_exact_table" + }, + { + "name" : "l2_ternary_table_counter", + "id" : 1, + "source_info" : { + "filename" : "p4src/main.p4", + "line" : 423, + "column" : 8, + "source_fragment" : "counters" + }, + "is_direct" : true, + "binding" : "IngressPipeImpl.l2_ternary_table" + }, + { + "name" : "acl_table_counter", + "id" : 2, + "source_info" : { + "filename" : "p4src/main.p4", + "line" : 494, + "column" : 8, + "source_fragment" : "counters" + }, + "is_direct" : true, + "binding" : "IngressPipeImpl.acl_table" + } + ], + "register_arrays" : [], + "calculations" : [ + { + "name" : "calc", + "id" : 0, + "source_info" : { + "filename" : "p4src/main.p4", + "line" : 580, + "column" : 8, + "source_fragment" : "update_checksum(hdr.ndp.isValid(), ..." + }, + "algo" : "csum16", + "input" : [ + { + "type" : "field", + "value" : ["ipv6", "src_addr"] + }, + { + "type" : "field", + "value" : ["ipv6", "dst_addr"] + }, + { + "type" : "field", + "value" : ["ipv6", "payload_len"] + }, + { + "type" : "hexstr", + "value" : "0x00", + "bitwidth" : 8 + }, + { + "type" : "field", + "value" : ["ipv6", "next_hdr"] + }, + { + "type" : "field", + "value" : ["icmpv6", "type"] + }, + { + "type" : "field", + "value" : ["icmpv6", "code"] + }, + { + "type" : "field", + "value" : ["ndp", "flags"] + }, + { + "type" : "field", + "value" : ["ndp", "target_ipv6_addr"] + }, + { + "type" : "field", + "value" : ["ndp", "type"] + }, + { + "type" : "field", + "value" : ["ndp", "length"] + }, + { + "type" : "field", + "value" : ["ndp", "target_mac_addr"] + } + ] + } + ], + "learn_lists" : [], + "actions" : [ + { + "name" : "NoAction", + "id" : 0, + "runtime_data" : [], + "primitives" : [] + }, + { + "name" : "IngressPipeImpl.drop", + "id" : 1, + "runtime_data" : [], + "primitives" : [ + { + "op" : "mark_to_drop", + "parameters" : [ + { + "type" : "header", + "value" : "standard_metadata" + } + ], + "source_info" : { + "filename" : "p4src/main.p4", + "line" : 351, + "column" : 8, + "source_fragment" : "mark_to_drop(standard_metadata)" + } + } + ] + }, + { + "name" : "IngressPipeImpl.drop", + "id" : 2, + "runtime_data" : [], + "primitives" : [ + { + "op" : "mark_to_drop", + "parameters" : [ + { + "type" : "header", + "value" : "standard_metadata" + } + ], + "source_info" : { + "filename" : "p4src/main.p4", + "line" : 351, + "column" : 8, + "source_fragment" : "mark_to_drop(standard_metadata)" + } + } + ] + }, + { + "name" : "IngressPipeImpl.drop", + "id" : 3, + "runtime_data" : [], + "primitives" : [ + { + "op" : "mark_to_drop", + "parameters" : [ + { + "type" : "header", + "value" : "standard_metadata" + } + ], + "source_info" : { + "filename" : "p4src/main.p4", + "line" : 351, + "column" : 8, + "source_fragment" : "mark_to_drop(standard_metadata)" + } + } + ] + }, + { + "name" : "IngressPipeImpl.set_egress_port", + "id" : 4, + "runtime_data" : [ + { + "name" : "port_num", + "bitwidth" : 9 + } + ], + "primitives" : [ + { + "op" : "assign", + "parameters" : [ + { + "type" : "field", + "value" : ["standard_metadata", "egress_spec"] + }, + { + "type" : "runtime_data", + "value" : 0 + } + ], + "source_info" : { + "filename" : "p4src/main.p4", + "line" : 383, + "column" : 8, + "source_fragment" : "standard_metadata.egress_spec = port_num" + } + } + ] + }, + { + "name" : "IngressPipeImpl.set_multicast_group", + "id" : 5, + "runtime_data" : [ + { + "name" : "gid", + "bitwidth" : 16 + } + ], + "primitives" : [ + { + "op" : "assign", + "parameters" : [ + { + "type" : "field", + "value" : ["standard_metadata", "mcast_grp"] + }, + { + "type" : "runtime_data", + "value" : 0 + } + ], + "source_info" : { + "filename" : "p4src/main.p4", + "line" : 409, + "column" : 8, + "source_fragment" : "standard_metadata.mcast_grp = gid" + } + }, + { + "op" : "assign", + "parameters" : [ + { + "type" : "field", + "value" : ["scalars", "local_metadata_t.is_multicast"] + }, + { + "type" : "expression", + "value" : { + "type" : "expression", + "value" : { + "op" : "b2d", + "left" : null, + "right" : { + "type" : "bool", + "value" : true + } + } + } + } + ], + "source_info" : { + "filename" : "p4src/main.p4", + "line" : 410, + "column" : 8, + "source_fragment" : "local_metadata.is_multicast = true" + } + } + ] + }, + { + "name" : "IngressPipeImpl.send_to_cpu", + "id" : 6, + "runtime_data" : [], + "primitives" : [ + { + "op" : "assign", + "parameters" : [ + { + "type" : "field", + "value" : ["standard_metadata", "egress_spec"] + }, + { + "type" : "hexstr", + "value" : "0x00ff" + } + ], + "source_info" : { + "filename" : "p4src/main.p4", + "line" : 466, + "column" : 8, + "source_fragment" : "standard_metadata.egress_spec = 255" + } + } + ] + }, + { + "name" : "IngressPipeImpl.clone_to_cpu", + "id" : 7, + "runtime_data" : [], + "primitives" : [ + { + "op" : "clone_ingress_pkt_to_egress", + "parameters" : [ + { + "type" : "hexstr", + "value" : "0x00000063" + }, + { + "type" : "hexstr", + "value" : "0x1" + } + ], + "source_info" : { + "filename" : "p4src/main.p4", + "line" : 474, + "column" : 8, + "source_fragment" : "clone3(CloneType.I2E, 99, { standard_metadata.ingress_port })" + } + } + ] + }, + { + "name" : "act", + "id" : 8, + "runtime_data" : [], + "primitives" : [ + { + "op" : "assign", + "parameters" : [ + { + "type" : "field", + "value" : ["scalars", "tmp"] + }, + { + "type" : "expression", + "value" : { + "type" : "expression", + "value" : { + "op" : "b2d", + "left" : null, + "right" : { + "type" : "bool", + "value" : true + } + } + } + } + ] + } + ] + }, + { + "name" : "act_0", + "id" : 9, + "runtime_data" : [], + "primitives" : [ + { + "op" : "assign", + "parameters" : [ + { + "type" : "field", + "value" : ["scalars", "tmp"] + }, + { + "type" : "expression", + "value" : { + "type" : "expression", + "value" : { + "op" : "b2d", + "left" : null, + "right" : { + "type" : "bool", + "value" : false + } + } + } + } + ] + } + ] + }, + { + "name" : "act_1", + "id" : 10, + "runtime_data" : [], + "primitives" : [ + { + "op" : "mark_to_drop", + "parameters" : [ + { + "type" : "header", + "value" : "standard_metadata" + } + ], + "source_info" : { + "filename" : "p4src/main.p4", + "line" : 567, + "column" : 12, + "source_fragment" : "mark_to_drop(standard_metadata)" + } + } + ] + } + ], + "pipelines" : [ + { + "name" : "ingress", + "id" : 0, + "source_info" : { + "filename" : "p4src/main.p4", + "line" : 345, + "column" : 8, + "source_fragment" : "IngressPipeImpl" + }, + "init_table" : "IngressPipeImpl.l2_exact_table", + "tables" : [ + { + "name" : "IngressPipeImpl.l2_exact_table", + "id" : 0, + "source_info" : { + "filename" : "p4src/main.p4", + "line" : 386, + "column" : 10, + "source_fragment" : "l2_exact_table" + }, + "key" : [ + { + "match_type" : "exact", + "name" : "hdr.ethernet.dst_addr", + "target" : ["ethernet", "dst_addr"], + "mask" : null + } + ], + "match_type" : "exact", + "type" : "simple", + "max_size" : 1024, + "with_counters" : true, + "support_timeout" : false, + "direct_meters" : null, + "action_ids" : [4, 1], + "actions" : ["IngressPipeImpl.set_egress_port", "IngressPipeImpl.drop"], + "base_default_next" : null, + "next_tables" : { + "__HIT__" : "tbl_act", + "__MISS__" : "tbl_act_0" + }, + "default_entry" : { + "action_id" : 1, + "action_const" : true, + "action_data" : [], + "action_entry_const" : true + } + }, + { + "name" : "tbl_act", + "id" : 1, + "key" : [], + "match_type" : "exact", + "type" : "simple", + "max_size" : 1024, + "with_counters" : false, + "support_timeout" : false, + "direct_meters" : null, + "action_ids" : [8], + "actions" : ["act"], + "base_default_next" : "node_5", + "next_tables" : { + "act" : "node_5" + }, + "default_entry" : { + "action_id" : 8, + "action_const" : true, + "action_data" : [], + "action_entry_const" : true + } + }, + { + "name" : "tbl_act_0", + "id" : 2, + "key" : [], + "match_type" : "exact", + "type" : "simple", + "max_size" : 1024, + "with_counters" : false, + "support_timeout" : false, + "direct_meters" : null, + "action_ids" : [9], + "actions" : ["act_0"], + "base_default_next" : "node_5", + "next_tables" : { + "act_0" : "node_5" + }, + "default_entry" : { + "action_id" : 9, + "action_const" : true, + "action_data" : [], + "action_entry_const" : true + } + }, + { + "name" : "IngressPipeImpl.l2_ternary_table", + "id" : 3, + "source_info" : { + "filename" : "p4src/main.p4", + "line" : 413, + "column" : 10, + "source_fragment" : "l2_ternary_table" + }, + "key" : [ + { + "match_type" : "ternary", + "name" : "hdr.ethernet.dst_addr", + "target" : ["ethernet", "dst_addr"], + "mask" : null + } + ], + "match_type" : "ternary", + "type" : "simple", + "max_size" : 1024, + "with_counters" : true, + "support_timeout" : false, + "direct_meters" : null, + "action_ids" : [5, 2], + "actions" : ["IngressPipeImpl.set_multicast_group", "IngressPipeImpl.drop"], + "base_default_next" : "IngressPipeImpl.acl_table", + "next_tables" : { + "IngressPipeImpl.set_multicast_group" : "IngressPipeImpl.acl_table", + "IngressPipeImpl.drop" : "IngressPipeImpl.acl_table" + }, + "default_entry" : { + "action_id" : 2, + "action_const" : true, + "action_data" : [], + "action_entry_const" : true + } + }, + { + "name" : "IngressPipeImpl.acl_table", + "id" : 4, + "source_info" : { + "filename" : "p4src/main.p4", + "line" : 477, + "column" : 10, + "source_fragment" : "acl_table" + }, + "key" : [ + { + "match_type" : "ternary", + "name" : "standard_metadata.ingress_port", + "target" : ["standard_metadata", "ingress_port"], + "mask" : null + }, + { + "match_type" : "ternary", + "name" : "hdr.ethernet.dst_addr", + "target" : ["ethernet", "dst_addr"], + "mask" : null + }, + { + "match_type" : "ternary", + "name" : "hdr.ethernet.src_addr", + "target" : ["ethernet", "src_addr"], + "mask" : null + }, + { + "match_type" : "ternary", + "name" : "hdr.ethernet.ether_type", + "target" : ["ethernet", "ether_type"], + "mask" : null + }, + { + "match_type" : "ternary", + "name" : "local_metadata.ip_proto", + "target" : ["scalars", "local_metadata_t.ip_proto"], + "mask" : null + }, + { + "match_type" : "ternary", + "name" : "local_metadata.icmp_type", + "target" : ["scalars", "local_metadata_t.icmp_type"], + "mask" : null + }, + { + "match_type" : "ternary", + "name" : "local_metadata.l4_src_port", + "target" : ["scalars", "local_metadata_t.l4_src_port"], + "mask" : null + }, + { + "match_type" : "ternary", + "name" : "local_metadata.l4_dst_port", + "target" : ["scalars", "local_metadata_t.l4_dst_port"], + "mask" : null + } + ], + "match_type" : "ternary", + "type" : "simple", + "max_size" : 1024, + "with_counters" : true, + "support_timeout" : false, + "direct_meters" : null, + "action_ids" : [6, 7, 3, 0], + "actions" : ["IngressPipeImpl.send_to_cpu", "IngressPipeImpl.clone_to_cpu", "IngressPipeImpl.drop", "NoAction"], + "base_default_next" : null, + "next_tables" : { + "IngressPipeImpl.send_to_cpu" : null, + "IngressPipeImpl.clone_to_cpu" : null, + "IngressPipeImpl.drop" : null, + "NoAction" : null + }, + "default_entry" : { + "action_id" : 0, + "action_const" : false, + "action_data" : [], + "action_entry_const" : false + } + } + ], + "action_profiles" : [], + "conditionals" : [ + { + "name" : "node_5", + "id" : 0, + "source_info" : { + "filename" : "p4src/main.p4", + "line" : 533, + "column" : 16, + "source_fragment" : "!l2_exact_table.apply().hit" + }, + "expression" : { + "type" : "expression", + "value" : { + "op" : "not", + "left" : null, + "right" : { + "type" : "expression", + "value" : { + "op" : "d2b", + "left" : null, + "right" : { + "type" : "field", + "value" : ["scalars", "tmp"] + } + } + } + } + }, + "true_next" : "IngressPipeImpl.l2_ternary_table", + "false_next" : "IngressPipeImpl.acl_table" + } + ] + }, + { + "name" : "egress", + "id" : 1, + "source_info" : { + "filename" : "p4src/main.p4", + "line" : 546, + "column" : 8, + "source_fragment" : "EgressPipeImpl" + }, + "init_table" : "node_10", + "tables" : [ + { + "name" : "tbl_act_1", + "id" : 5, + "source_info" : { + "filename" : "p4src/main.p4", + "line" : 567, + "column" : 12, + "source_fragment" : "mark_to_drop(standard_metadata)" + }, + "key" : [], + "match_type" : "exact", + "type" : "simple", + "max_size" : 1024, + "with_counters" : false, + "support_timeout" : false, + "direct_meters" : null, + "action_ids" : [10], + "actions" : ["act_1"], + "base_default_next" : null, + "next_tables" : { + "act_1" : null + }, + "default_entry" : { + "action_id" : 10, + "action_const" : true, + "action_data" : [], + "action_entry_const" : true + } + } + ], + "action_profiles" : [], + "conditionals" : [ + { + "name" : "node_10", + "id" : 1, + "source_info" : { + "filename" : "p4src/main.p4", + "line" : 565, + "column" : 12, + "source_fragment" : "local_metadata.is_multicast == true && ..." + }, + "expression" : { + "type" : "expression", + "value" : { + "op" : "and", + "left" : { + "type" : "expression", + "value" : { + "op" : "==", + "left" : { + "type" : "expression", + "value" : { + "op" : "d2b", + "left" : null, + "right" : { + "type" : "field", + "value" : ["scalars", "local_metadata_t.is_multicast"] + } + } + }, + "right" : { + "type" : "bool", + "value" : true + } + } + }, + "right" : { + "type" : "expression", + "value" : { + "op" : "==", + "left" : { + "type" : "field", + "value" : ["standard_metadata", "ingress_port"] + }, + "right" : { + "type" : "field", + "value" : ["standard_metadata", "egress_port"] + } + } + } + } + }, + "false_next" : null, + "true_next" : "tbl_act_1" + } + ] + } + ], + "checksums" : [ + { + "name" : "cksum", + "id" : 0, + "source_info" : { + "filename" : "p4src/main.p4", + "line" : 580, + "column" : 8, + "source_fragment" : "update_checksum(hdr.ndp.isValid(), ..." + }, + "target" : ["icmpv6", "checksum"], + "type" : "generic", + "calculation" : "calc", + "verify" : false, + "update" : true, + "if_cond" : { + "type" : "expression", + "value" : { + "op" : "d2b", + "left" : null, + "right" : { + "type" : "field", + "value" : ["ndp", "$valid$"] + } + } + } + } + ], + "force_arith" : [], + "extern_instances" : [], + "field_aliases" : [ + [ + "queueing_metadata.enq_timestamp", + ["standard_metadata", "enq_timestamp"] + ], + [ + "queueing_metadata.enq_qdepth", + ["standard_metadata", "enq_qdepth"] + ], + [ + "queueing_metadata.deq_timedelta", + ["standard_metadata", "deq_timedelta"] + ], + [ + "queueing_metadata.deq_qdepth", + ["standard_metadata", "deq_qdepth"] + ], + [ + "intrinsic_metadata.ingress_global_timestamp", + ["standard_metadata", "ingress_global_timestamp"] + ], + [ + "intrinsic_metadata.egress_global_timestamp", + ["standard_metadata", "egress_global_timestamp"] + ], + [ + "intrinsic_metadata.lf_field_list", + ["standard_metadata", "lf_field_list"] + ], + [ + "intrinsic_metadata.mcast_grp", + ["standard_metadata", "mcast_grp"] + ], + [ + "intrinsic_metadata.resubmit_flag", + ["standard_metadata", "resubmit_flag"] + ], + [ + "intrinsic_metadata.egress_rid", + ["standard_metadata", "egress_rid"] + ], + [ + "intrinsic_metadata.recirculate_flag", + ["standard_metadata", "recirculate_flag"] + ], + [ + "intrinsic_metadata.priority", + ["standard_metadata", "priority"] + ] + ], + "program" : "p4src/main.p4", + "__meta__" : { + "version" : [2, 18], + "compiler" : "https://github.com/p4lang/p4c" + } +} \ No newline at end of file diff --git a/src/device/tests/p4/test-p4info.txt b/src/device/tests/p4/test-p4info.txt new file mode 100644 index 0000000000000000000000000000000000000000..6382852ad8f597786003252184153b526c66fb9e --- /dev/null +++ b/src/device/tests/p4/test-p4info.txt @@ -0,0 +1,245 @@ +pkg_info { + arch: "v1model" +} +tables { + preamble { + id: 33605373 + name: "IngressPipeImpl.l2_exact_table" + alias: "l2_exact_table" + } + match_fields { + id: 1 + name: "hdr.ethernet.dst_addr" + bitwidth: 48 + match_type: EXACT + } + action_refs { + id: 16812802 + } + action_refs { + id: 16796182 + annotations: "@defaultonly" + scope: DEFAULT_ONLY + } + const_default_action_id: 16796182 + direct_resource_ids: 318813612 + size: 1024 +} +tables { + preamble { + id: 33573501 + name: "IngressPipeImpl.l2_ternary_table" + alias: "l2_ternary_table" + } + match_fields { + id: 1 + name: "hdr.ethernet.dst_addr" + bitwidth: 48 + match_type: TERNARY + } + action_refs { + id: 16841371 + } + action_refs { + id: 16796182 + annotations: "@defaultonly" + scope: DEFAULT_ONLY + } + const_default_action_id: 16796182 + direct_resource_ids: 318768597 + size: 1024 +} +tables { + preamble { + id: 33557865 + name: "IngressPipeImpl.acl_table" + alias: "acl_table" + } + match_fields { + id: 1 + name: "standard_metadata.ingress_port" + bitwidth: 9 + match_type: TERNARY + } + match_fields { + id: 2 + name: "hdr.ethernet.dst_addr" + bitwidth: 48 + match_type: TERNARY + } + match_fields { + id: 3 + name: "hdr.ethernet.src_addr" + bitwidth: 48 + match_type: TERNARY + } + match_fields { + id: 4 + name: "hdr.ethernet.ether_type" + bitwidth: 16 + match_type: TERNARY + } + match_fields { + id: 5 + name: "local_metadata.ip_proto" + bitwidth: 8 + match_type: TERNARY + } + match_fields { + id: 6 + name: "local_metadata.icmp_type" + bitwidth: 8 + match_type: TERNARY + } + match_fields { + id: 7 + name: "local_metadata.l4_src_port" + bitwidth: 16 + match_type: TERNARY + } + match_fields { + id: 8 + name: "local_metadata.l4_dst_port" + bitwidth: 16 + match_type: TERNARY + } + action_refs { + id: 16833331 + } + action_refs { + id: 16782152 + } + action_refs { + id: 16796182 + } + action_refs { + id: 16800567 + annotations: "@defaultonly" + scope: DEFAULT_ONLY + } + direct_resource_ids: 318773822 + size: 1024 +} +actions { + preamble { + id: 16800567 + name: "NoAction" + alias: "NoAction" + } +} +actions { + preamble { + id: 16796182 + name: "IngressPipeImpl.drop" + alias: "drop" + } +} +actions { + preamble { + id: 16812802 + name: "IngressPipeImpl.set_egress_port" + alias: "set_egress_port" + } + params { + id: 1 + name: "port_num" + bitwidth: 9 + } +} +actions { + preamble { + id: 16841371 + name: "IngressPipeImpl.set_multicast_group" + alias: "set_multicast_group" + } + params { + id: 1 + name: "gid" + bitwidth: 16 + } +} +actions { + preamble { + id: 16833331 + name: "IngressPipeImpl.send_to_cpu" + alias: "send_to_cpu" + } +} +actions { + preamble { + id: 16782152 + name: "IngressPipeImpl.clone_to_cpu" + alias: "clone_to_cpu" + } +} +direct_counters { + preamble { + id: 318813612 + name: "l2_exact_table_counter" + alias: "l2_exact_table_counter" + } + spec { + unit: BOTH + } + direct_table_id: 33605373 +} +direct_counters { + preamble { + id: 318768597 + name: "l2_ternary_table_counter" + alias: "l2_ternary_table_counter" + } + spec { + unit: BOTH + } + direct_table_id: 33573501 +} +direct_counters { + preamble { + id: 318773822 + name: "acl_table_counter" + alias: "acl_table_counter" + } + spec { + unit: BOTH + } + direct_table_id: 33557865 +} +controller_packet_metadata { + preamble { + id: 67132047 + name: "packet_in" + alias: "packet_in" + annotations: "@controller_header(\"packet_in\")" + } + metadata { + id: 1 + name: "ingress_port" + bitwidth: 9 + } + metadata { + id: 2 + name: "_pad" + bitwidth: 7 + } +} +controller_packet_metadata { + preamble { + id: 67111875 + name: "packet_out" + alias: "packet_out" + annotations: "@controller_header(\"packet_out\")" + } + metadata { + id: 1 + name: "egress_port" + bitwidth: 9 + } + metadata { + id: 2 + name: "_pad" + bitwidth: 7 + } +} +type_info { +} diff --git a/src/device/tests/test_internal_p4.py b/src/device/tests/test_internal_p4.py new file mode 100644 index 0000000000000000000000000000000000000000..4907e538843dfa5d9c7833b4d02f05e483720510 --- /dev/null +++ b/src/device/tests/test_internal_p4.py @@ -0,0 +1,252 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Internal P4 driver tests. +""" + +import pytest +from device.service.drivers.p4.p4_driver import P4Driver +from device.service.drivers.p4.p4_common import ( + matches_mac, encode_mac, decode_mac, encode, + matches_ipv4, encode_ipv4, decode_ipv4, + matches_ipv6, encode_ipv6, decode_ipv6, + encode_num, decode_num +) +from .device_p4 import( + DEVICE_P4_IP_ADDR, DEVICE_P4_PORT, DEVICE_P4_DPID, DEVICE_P4_NAME, + DEVICE_P4_VENDOR, DEVICE_P4_HW_VER, DEVICE_P4_SW_VER, + DEVICE_P4_WORKERS, DEVICE_P4_GRACE_PERIOD, + DEVICE_P4_CONFIG_TABLE_ENTRY, DEVICE_P4_DECONFIG_TABLE_ENTRY) +from .mock_p4runtime_service import MockP4RuntimeService + + +@pytest.fixture(scope='session') +def p4runtime_service(): + """ + Spawn a mock P4Runtime server. + + :return: void + """ + _service = MockP4RuntimeService( + address=DEVICE_P4_IP_ADDR, port=DEVICE_P4_PORT, + max_workers=DEVICE_P4_WORKERS, + grace_period=DEVICE_P4_GRACE_PERIOD) + _service.start() + yield _service + _service.stop() + + +@pytest.fixture(scope='session') +def device_driverapi_p4(): + """ + Invoke an instance of the P4 driver. + + :return: void + """ + _driver = P4Driver( + address=DEVICE_P4_IP_ADDR, + port=DEVICE_P4_PORT, + id=DEVICE_P4_DPID, + name=DEVICE_P4_NAME, + vendor=DEVICE_P4_VENDOR, + hw_ver=DEVICE_P4_HW_VER, + sw_ver=DEVICE_P4_SW_VER) + _driver.Connect() + yield _driver + _driver.Disconnect() + + +def test_device_driverapi_p4_setconfig( + p4runtime_service: MockP4RuntimeService, + device_driverapi_p4: P4Driver): + """ + Test the SetConfig RPC of the P4 driver API. + + :param p4runtime_service: Mock P4Runtime service + :param device_driverapi_p4: instance of the P4 device driver + :return: void + """ + result = device_driverapi_p4.SetConfig( + DEVICE_P4_CONFIG_TABLE_ENTRY + ) + assert list(result) + + +def test_device_driverapi_p4_getconfig( + p4runtime_service: MockP4RuntimeService, + device_driverapi_p4: P4Driver): + """ + Test the GetConfig RPC of the P4 driver API. + + :param p4runtime_service: Mock P4Runtime service + :param device_driverapi_p4: instance of the P4 device driver + :return: void + """ + pytest.skip('Skipping test: GetConfig') + + +def test_device_driverapi_p4_getresource( + p4runtime_service: MockP4RuntimeService, + device_driverapi_p4: P4Driver): + """ + Test the GetResource RPC of the P4 driver API. + + :param p4runtime_service: Mock P4Runtime service + :param device_driverapi_p4: instance of the P4 device driver + :return: void + """ + pytest.skip('Skipping test: GetResource') + + +def test_device_driverapi_p4_deleteconfig( + p4runtime_service: MockP4RuntimeService, + device_driverapi_p4: P4Driver): + """ + Test the DeleteConfig RPC of the P4 driver API. + + :param p4runtime_service: Mock P4Runtime service + :param device_driverapi_p4: instance of the P4 device driver + :return: void + """ + result = device_driverapi_p4.DeleteConfig( + DEVICE_P4_DECONFIG_TABLE_ENTRY + ) + assert list(result) + + +def test_device_driverapi_p4_subscribe_state( + p4runtime_service: MockP4RuntimeService, + device_driverapi_p4: P4Driver): + """ + Test the SubscribeState RPC of the P4 driver API. + + :param p4runtime_service: Mock P4Runtime service + :param device_driverapi_p4: instance of the P4 device driver + :return: void + """ + pytest.skip('Skipping test: SubscribeState') + + +def test_device_driverapi_p4_getstate( + p4runtime_service: MockP4RuntimeService, + device_driverapi_p4: P4Driver): + """ + Test the GetState RPC of the P4 driver API. + + :param p4runtime_service: Mock P4Runtime service + :param device_driverapi_p4: instance of the P4 device driver + :return: void + """ + pytest.skip('Skipping test: GetState') + + +def test_device_driverapi_p4_unsubscribe_state( + p4runtime_service: MockP4RuntimeService, + device_driverapi_p4: P4Driver): + """ + Test the UnsubscribeState RPC of the P4 driver API. + + :param p4runtime_service: Mock P4Runtime service + :param device_driverapi_p4: instance of the P4 device driver + :return: void + """ + pytest.skip('Skipping test: UnsubscribeState') + + +def test_p4_common_mac(): + """ + Test MAC converters. + + :return: void + """ + wrong_mac = "aa:bb:cc:dd:ee" + assert not matches_mac(wrong_mac) + + mac = "aa:bb:cc:dd:ee:fe" + assert matches_mac(mac) + enc_mac = encode_mac(mac) + assert enc_mac == b'\xaa\xbb\xcc\xdd\xee\xfe',\ + "String-based MAC address to bytes failed" + enc_mac = encode(mac, 6*8) + assert enc_mac == b'\xaa\xbb\xcc\xdd\xee\xfe',\ + "String-based MAC address to bytes failed" + dec_mac = decode_mac(enc_mac) + assert mac == dec_mac,\ + "MAC address bytes to string failed" + + +def test_p4_common_ipv4(): + """ + Test IPv4 converters. + + :return: void + """ + assert not matches_ipv4("10.0.0.1.5") + assert not matches_ipv4("256.0.0.1") + assert not matches_ipv4("256.0.1") + assert not matches_ipv4("10001") + + ipv4 = "10.0.0.1" + assert matches_ipv4(ipv4) + enc_ipv4 = encode_ipv4(ipv4) + assert enc_ipv4 == b'\x0a\x00\x00\x01',\ + "String-based IPv4 address to bytes failed" + dec_ipv4 = decode_ipv4(enc_ipv4) + assert ipv4 == dec_ipv4,\ + "IPv4 address bytes to string failed" + + +def test_p4_common_ipv6(): + """ + Test IPv6 converters. + + :return: void + """ + assert not matches_ipv6('10.0.0.1') + assert matches_ipv6('2001:0000:85a3::8a2e:370:1111') + + ipv6 = "1:2:3:4:5:6:7:8" + assert matches_ipv6(ipv6) + enc_ipv6 = encode_ipv6(ipv6) + assert enc_ipv6 == \ + b'\x00\x01\x00\x02\x00\x03\x00\x04\x00\x05\x00\x06\x00\x07\x00\x08',\ + "String-based IPv6 address to bytes failed" + dec_ipv6 = decode_ipv6(enc_ipv6) + assert ipv6 == dec_ipv6,\ + "IPv6 address bytes to string failed" + + +def test_p4_common_numbers(): + """ + Test numerical converters. + + :return: void + """ + num = 1337 + byte_len = 5 + enc_num = encode_num(num, byte_len * 8) + assert enc_num == b'\x00\x00\x00\x05\x39',\ + "Number to bytes conversion failed" + dec_num = decode_num(enc_num) + assert num == dec_num,\ + "Bytes to number conversion failed" + assert encode((num,), byte_len * 8) == enc_num + assert encode([num], byte_len * 8) == enc_num + + num = 256 + try: + encode_num(num, 8) + except OverflowError: + pass diff --git a/src/device/tests/test_unit_p4.py b/src/device/tests/test_unit_p4.py deleted file mode 100644 index 777ab280aa2b500c3c2b445fcecdf81024b817f3..0000000000000000000000000000000000000000 --- a/src/device/tests/test_unit_p4.py +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import pytest -from device.service.drivers.p4.p4_driver import P4Driver -from .device_p4 import( - DEVICE_P4_ADDRESS, DEVICE_P4_PORT, DEVICE_P4_DPID, DEVICE_P4_NAME, - DEVICE_P4_VENDOR, DEVICE_P4_HW_VER, DEVICE_P4_SW_VER, - DEVICE_P4_PIPECONF, DEVICE_P4_WORKERS, DEVICE_P4_GRACE_PERIOD) -from .mock_p4runtime_service import MockP4RuntimeService - - -@pytest.fixture(scope='session') -def p4runtime_service(): - _service = MockP4RuntimeService( - address=DEVICE_P4_ADDRESS, port=DEVICE_P4_PORT, - max_workers=DEVICE_P4_WORKERS, - grace_period=DEVICE_P4_GRACE_PERIOD) - _service.start() - yield _service - _service.stop() - - -@pytest.fixture(scope='session') -def device_driverapi_p4(): - _driver = P4Driver( - address=DEVICE_P4_ADDRESS, - port=DEVICE_P4_PORT, - id=DEVICE_P4_DPID, - name=DEVICE_P4_NAME, - vendor=DEVICE_P4_VENDOR, - hw_ver=DEVICE_P4_HW_VER, - sw_ver=DEVICE_P4_SW_VER, - pipeconf=DEVICE_P4_PIPECONF) - _driver.Connect() - yield _driver - _driver.Disconnect() - - -def test_device_driverapi_p4_setconfig( - p4runtime_service: MockP4RuntimeService, - device_driverapi_p4: P4Driver): # pylint: disable=redefined-outer-name - device_driverapi_p4.SetConfig([]) - return - - -def test_device_driverapi_p4_getconfig( - p4runtime_service: MockP4RuntimeService, - device_driverapi_p4: P4Driver): # pylint: disable=redefined-outer-name - device_driverapi_p4.GetConfig() - return - - -def test_device_driverapi_p4_getresource( - p4runtime_service: MockP4RuntimeService, - device_driverapi_p4: P4Driver): # pylint: disable=redefined-outer-name - device_driverapi_p4.GetResource("") - return - - -def test_device_driverapi_p4_getstate( - p4runtime_service: MockP4RuntimeService, - device_driverapi_p4: P4Driver): # pylint: disable=redefined-outer-name - device_driverapi_p4.GetState() - return - - -def test_device_driverapi_p4_deleteconfig( - p4runtime_service: MockP4RuntimeService, - device_driverapi_p4: P4Driver): # pylint: disable=redefined-outer-name - device_driverapi_p4.DeleteConfig([]) - return - - -def test_device_driverapi_p4_subscribe_state( - p4runtime_service: MockP4RuntimeService, - device_driverapi_p4: P4Driver): # pylint: disable=redefined-outer-name - device_driverapi_p4.SubscribeState([]) - return - - -def test_device_driverapi_p4_unsubscribe_state( - p4runtime_service: MockP4RuntimeService, - device_driverapi_p4: P4Driver): # pylint: disable=redefined-outer-name - device_driverapi_p4.UnsubscribeState([]) - return diff --git a/src/device/tests/test_unitary_openconfig.py b/src/device/tests/test_unitary_openconfig.py index 32fb5709a98d095982d46d16450117a84f89f165..6144a95d96bbbfd68213356f06573a2200c11bb1 100644 --- a/src/device/tests/test_unitary_openconfig.py +++ b/src/device/tests/test_unitary_openconfig.py @@ -29,8 +29,12 @@ from .PrepareTestScenario import ( # pylint: disable=unused-import mock_service, device_service, context_client, device_client, monitoring_client, test_prepare_environment) try: - from .Device_OpenConfig_Infinera1 import( + #from .Device_OpenConfig_Infinera1 import( #from .Device_OpenConfig_Infinera2 import( + #from .Device_OpenConfig_Adva import( + #from .Device_OpenConfig_Adva_149 import( + from .Device_OpenConfig_Adva_155 import( + #from .Device_OpenConfig_Cisco import( DEVICE_OC, DEVICE_OC_CONFIG_RULES, DEVICE_OC_DECONFIG_RULES, DEVICE_OC_CONNECT_RULES, DEVICE_OC_ID, DEVICE_OC_UUID) ENABLE_OPENCONFIG = True @@ -38,10 +42,9 @@ except ImportError: ENABLE_OPENCONFIG = False ENABLE_OPENCONFIG_CONFIGURE = True -ENABLE_OPENCONFIG_MONITOR = True +ENABLE_OPENCONFIG_MONITOR = False ENABLE_OPENCONFIG_DECONFIGURE = True - logging.getLogger('apscheduler.executors.default').setLevel(logging.WARNING) logging.getLogger('apscheduler.scheduler').setLevel(logging.WARNING) logging.getLogger('monitoring-client').setLevel(logging.WARNING) diff --git a/src/device/tests/test_unitary_p4.py b/src/device/tests/test_unitary_p4.py index 86a669bd40deb8f7839d3e682b8a1f52f3c38e1b..43313caff33d646918b9be23c87e499185714a2c 100644 --- a/src/device/tests/test_unitary_p4.py +++ b/src/device/tests/test_unitary_p4.py @@ -12,22 +12,34 @@ # See the License for the specific language governing permissions and # limitations under the License. -import copy, grpc, logging, pytest -from common.proto.context_pb2 import Device, DeviceId +""" +P4 unit tests. +""" + +import copy +import logging +import operator +import grpc +import pytest +from common.proto.context_pb2 import ConfigActionEnum, Device, DeviceId,\ + DeviceOperationalStatusEnum from common.tools.grpc.Tools import grpc_message_to_json_string from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient from device.service.DeviceService import DeviceService from device.service.driver_api._Driver import _Driver -from .PrepareTestScenario import ( # pylint: disable=unused-import +from .PrepareTestScenario import ( # pylint: disable=unused-import # be careful, order of symbols is important here! - mock_service, device_service, context_client, device_client, monitoring_client, test_prepare_environment) + mock_service, device_service, context_client, device_client, + monitoring_client, test_prepare_environment) from .mock_p4runtime_service import MockP4RuntimeService try: from .device_p4 import( - DEVICE_P4, DEVICE_P4_ID, DEVICE_P4_UUID, DEVICE_P4_ADDRESS, DEVICE_P4_PORT, DEVICE_P4_WORKERS, - DEVICE_P4_GRACE_PERIOD, DEVICE_P4_CONNECT_RULES, DEVICE_P4_CONFIG_RULES) + DEVICE_P4, DEVICE_P4_ID, DEVICE_P4_UUID, + DEVICE_P4_IP_ADDR, DEVICE_P4_PORT, DEVICE_P4_WORKERS, + DEVICE_P4_GRACE_PERIOD, DEVICE_P4_CONNECT_RULES, + DEVICE_P4_CONFIG_TABLE_ENTRY, DEVICE_P4_DECONFIG_TABLE_ENTRY) ENABLE_P4 = True except ImportError: ENABLE_P4 = False @@ -35,10 +47,17 @@ except ImportError: LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.DEBUG) + @pytest.fixture(scope='session') def p4runtime_service(): + """ + Spawn a mock P4Runtime server. + + :return: void + """ _service = MockP4RuntimeService( - address=DEVICE_P4_ADDRESS, port=DEVICE_P4_PORT, + address=DEVICE_P4_IP_ADDR, + port=DEVICE_P4_PORT, max_workers=DEVICE_P4_WORKERS, grace_period=DEVICE_P4_GRACE_PERIOD) _service.start() @@ -47,27 +66,35 @@ def p4runtime_service(): # ----- Test Device Driver P4 -------------------------------------------------- - def test_device_p4_add_error_cases( context_client: ContextClient, # pylint: disable=redefined-outer-name device_client: DeviceClient, # pylint: disable=redefined-outer-name device_service: DeviceService): # pylint: disable=redefined-outer-name + """ + Test AddDevice RPC with wrong inputs. - if not ENABLE_P4: pytest.skip( - 'Skipping test: No P4 device has been configured') + :param context_client: context component client + :param device_client: device component client + :param device_service: device component service + :return: + """ - with pytest.raises(grpc.RpcError) as e: + if not ENABLE_P4: + pytest.skip('Skipping test: No P4 device has been configured') + + with pytest.raises(grpc.RpcError) as ex: device_p4_with_extra_rules = copy.deepcopy(DEVICE_P4) device_p4_with_extra_rules['device_config']['config_rules'].extend( DEVICE_P4_CONNECT_RULES) device_p4_with_extra_rules['device_config']['config_rules'].extend( - DEVICE_P4_CONFIG_RULES) + DEVICE_P4_CONFIG_TABLE_ENTRY) device_client.AddDevice(Device(**device_p4_with_extra_rules)) - assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT + assert ex.value.code() == grpc.StatusCode.INVALID_ARGUMENT msg_head = 'device.device_config.config_rules([' - msg_tail = ']) is invalid; RPC method AddDevice only accepts connection Config Rules that should start '\ - 'with "_connect/" tag. Others should be configured after adding the device.' - except_msg = str(e.value.details()) + msg_tail = ']) is invalid; RPC method AddDevice only accepts connection '\ + 'Config Rules that should start with "_connect/" tag. '\ + 'Others should be configured after adding the device.' + except_msg = str(ex.value.details()) assert except_msg.startswith(msg_head) and except_msg.endswith(msg_tail) @@ -76,35 +103,67 @@ def test_device_p4_add_correct( device_client: DeviceClient, # pylint: disable=redefined-outer-name device_service: DeviceService, # pylint: disable=redefined-outer-name p4runtime_service: MockP4RuntimeService): # pylint: disable=redefined-outer-name + """ + Test AddDevice RPC with correct inputs. + + :param context_client: context component client + :param device_client: device component client + :param device_service: device component service + :param p4runtime_service: Mock P4Runtime service + :return: + """ - if not ENABLE_P4: pytest.skip( - 'Skipping test: No P4 device has been configured') + if not ENABLE_P4: + pytest.skip('Skipping test: No P4 device has been configured') device_p4_with_connect_rules = copy.deepcopy(DEVICE_P4) device_p4_with_connect_rules['device_config']['config_rules'].extend( DEVICE_P4_CONNECT_RULES) device_client.AddDevice(Device(**device_p4_with_connect_rules)) driver_instance_cache = device_service.device_servicer.driver_instance_cache - driver : _Driver = driver_instance_cache.get(DEVICE_P4_UUID) + driver: _Driver = driver_instance_cache.get(DEVICE_P4_UUID) assert driver is not None + device_data = context_client.GetDevice(DeviceId(**DEVICE_P4_ID)) + config_rules = [ + ( + ConfigActionEnum.Name(config_rule.action), + config_rule.custom.resource_key, + config_rule.custom.resource_value + ) + for config_rule in device_data.device_config.config_rules + if config_rule.WhichOneof('config_rule') == 'custom' + ] + LOGGER.info('device_data.device_config.config_rules = \n{:s}'.format( + '\n'.join(['{:s} {:s} = {:s}'.format(*config_rule) + for config_rule in config_rules]))) + def test_device_p4_get( context_client: ContextClient, # pylint: disable=redefined-outer-name device_client: DeviceClient, # pylint: disable=redefined-outer-name device_service: DeviceService, # pylint: disable=redefined-outer-name p4runtime_service: MockP4RuntimeService): # pylint: disable=redefined-outer-name + """ + Test GetDevice RPC. + + :param context_client: context component client + :param device_client: device component client + :param device_service: device component service + :param p4runtime_service: Mock P4Runtime service + :return: + """ - if not ENABLE_P4: pytest.skip( - 'Skipping test: No P4 device has been configured') + if not ENABLE_P4: + pytest.skip('Skipping test: No P4 device has been configured') initial_config = device_client.GetInitialConfig(DeviceId(**DEVICE_P4_ID)) - LOGGER.info('initial_config = {:s}'.format( - grpc_message_to_json_string(initial_config))) + assert len(initial_config.config_rules) == 0 + LOGGER.info('initial_config = %s', + grpc_message_to_json_string(initial_config)) device_data = context_client.GetDevice(DeviceId(**DEVICE_P4_ID)) - LOGGER.info('device_data = {:s}'.format( - grpc_message_to_json_string(device_data))) + LOGGER.info('device_data = %s', grpc_message_to_json_string(device_data)) def test_device_p4_configure( @@ -112,11 +171,58 @@ def test_device_p4_configure( device_client: DeviceClient, # pylint: disable=redefined-outer-name device_service: DeviceService, # pylint: disable=redefined-outer-name p4runtime_service: MockP4RuntimeService): # pylint: disable=redefined-outer-name + """ + Test ConfigureDevice RPC. - if not ENABLE_P4: pytest.skip( - 'Skipping test: No P4 device has been configured') + :param context_client: context component client + :param device_client: device component client + :param device_service: device component service + :param p4runtime_service: Mock P4Runtime service + :return: + """ - pytest.skip('Skipping test for unimplemented method') + if not ENABLE_P4: + pytest.skip('Skipping test: No P4 device has been configured') + + driver_instance_cache = device_service.device_servicer.driver_instance_cache + driver: _Driver = driver_instance_cache.get(DEVICE_P4_UUID) + assert driver is not None + + # No entries should exist at this point in time + driver_config = sorted(driver.GetConfig(), key=operator.itemgetter(0)) + assert len(driver_config) == len(driver.get_manager().get_resource_keys()) + assert driver.get_manager().count_active_entries() == 0 + + # Flip the operational status and check it is correctly flipped in Context + device_p4_with_operational_status = copy.deepcopy(DEVICE_P4) + device_p4_with_operational_status['device_operational_status'] = \ + DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED + device_client.ConfigureDevice(Device(**device_p4_with_operational_status)) + device_data = context_client.GetDevice(DeviceId(**DEVICE_P4_ID)) + assert device_data.device_operational_status == \ + DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED + + # Insert a new table entry + device_p4_with_config_rules = copy.deepcopy(DEVICE_P4) + device_p4_with_config_rules['device_config']['config_rules'].extend( + DEVICE_P4_CONFIG_TABLE_ENTRY) + device_client.ConfigureDevice(Device(**device_p4_with_config_rules)) + + device_data = context_client.GetDevice(DeviceId(**DEVICE_P4_ID)) + config_rules = [ + (ConfigActionEnum.Name(config_rule.action), + config_rule.custom.resource_key, + config_rule.custom.resource_value) + for config_rule in device_data.device_config.config_rules + if config_rule.WhichOneof('config_rule') == 'custom' + ] + LOGGER.info('device_data.device_config.config_rules = \n{:s}'.format( + '\n'.join( + ['{:s} {:s} = {:s}'.format(*config_rule) + for config_rule in config_rules])) + ) + for config_rule in DEVICE_P4_CONFIG_TABLE_ENTRY: + assert 'custom' in config_rule def test_device_p4_deconfigure( @@ -124,11 +230,53 @@ def test_device_p4_deconfigure( device_client: DeviceClient, # pylint: disable=redefined-outer-name device_service: DeviceService, # pylint: disable=redefined-outer-name p4runtime_service: MockP4RuntimeService): # pylint: disable=redefined-outer-name + """ + Test DeconfigureDevice RPC. - if not ENABLE_P4: pytest.skip( - 'Skipping test: No P4 device has been configured') + :param context_client: context component client + :param device_client: device component client + :param device_service: device component service + :param p4runtime_service: Mock P4Runtime service + :return: + """ - pytest.skip('Skipping test for unimplemented method') + if not ENABLE_P4: + pytest.skip('Skipping test: No P4 device has been configured') + + driver_instance_cache = device_service.device_servicer.driver_instance_cache + driver: _Driver = driver_instance_cache.get(DEVICE_P4_UUID) + assert driver is not None + + # Delete a table entry + device_p4_with_config_rules = copy.deepcopy(DEVICE_P4) + device_p4_with_config_rules['device_config']['config_rules'].extend( + DEVICE_P4_DECONFIG_TABLE_ENTRY) + device_client.ConfigureDevice(Device(**device_p4_with_config_rules)) + + device_data = context_client.GetDevice(DeviceId(**DEVICE_P4_ID)) + config_rules = [ + (ConfigActionEnum.Name(config_rule.action), + config_rule.custom.resource_key, + config_rule.custom.resource_value) + for config_rule in device_data.device_config.config_rules + if config_rule.WhichOneof('config_rule') == 'custom' + ] + LOGGER.info('device_data.device_config.config_rules = \n{:s}'.format( + '\n'.join( + ['{:s} {:s} = {:s}'.format(*config_rule) + for config_rule in config_rules])) + ) + for config_rule in DEVICE_P4_CONFIG_TABLE_ENTRY: + assert 'custom' in config_rule + + # Flip the operational status and check it is correctly flipped in Context + device_p4_with_operational_status = copy.deepcopy(DEVICE_P4) + device_p4_with_operational_status['device_operational_status'] = \ + DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_DISABLED + device_client.ConfigureDevice(Device(**device_p4_with_operational_status)) + device_data = context_client.GetDevice(DeviceId(**DEVICE_P4_ID)) + assert device_data.device_operational_status == \ + DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_DISABLED def test_device_p4_delete( @@ -136,10 +284,20 @@ def test_device_p4_delete( device_client: DeviceClient, # pylint: disable=redefined-outer-name device_service: DeviceService, # pylint: disable=redefined-outer-name p4runtime_service: MockP4RuntimeService): # pylint: disable=redefined-outer-name + """ + Test DeleteDevice RPC. + + :param context_client: context component client + :param device_client: device component client + :param device_service: device component service + :param p4runtime_service: Mock P4Runtime service + :return: + """ - if not ENABLE_P4: pytest.skip('Skipping test: No P4 device has been configured') + if not ENABLE_P4: + pytest.skip('Skipping test: No P4 device has been configured') device_client.DeleteDevice(DeviceId(**DEVICE_P4_ID)) driver_instance_cache = device_service.device_servicer.driver_instance_cache - driver : _Driver = driver_instance_cache.get(DEVICE_P4_UUID) + driver: _Driver = driver_instance_cache.get(DEVICE_P4_UUID) assert driver is None diff --git a/src/monitoring/requirements.in b/src/monitoring/requirements.in index c77d9683a2372435779db520f9f4c537d5e012b0..95953f100c448557471d112cd1e5e8a072320b30 100644 --- a/src/monitoring/requirements.in +++ b/src/monitoring/requirements.in @@ -5,10 +5,10 @@ fastcache==1.1.0 #opencensus[stackdriver] #google-cloud-profiler #numpy -Jinja2==3.0.3 -ncclient==0.6.13 -p4runtime==1.3.0 -paramiko==2.9.2 +#Jinja2==3.0.3 +#ncclient==0.6.13 +#p4runtime==1.3.0 +#paramiko==2.9.2 influx-line-protocol==0.1.4 python-dateutil==2.8.2 python-json-logger==2.0.2 diff --git a/src/monitoring/tests/test_unitary.py b/src/monitoring/tests/test_unitary.py index f8b4d59bdf61b6897da36258496eb5be7faaf8a2..45f55cb05f8c0cb5c9cdb2b3f02ed70c80285009 100644 --- a/src/monitoring/tests/test_unitary.py +++ b/src/monitoring/tests/test_unitary.py @@ -32,8 +32,11 @@ from device.client.DeviceClient import DeviceClient from device.service.DeviceService import DeviceService from device.service.driver_api.DriverFactory import DriverFactory from device.service.driver_api.DriverInstanceCache import DriverInstanceCache -from device.service.drivers import DRIVERS +os.environ['DEVICE_EMULATED_ONLY'] = 'TRUE' +from device.service.drivers import DRIVERS # pylint: disable=wrong-import-position + +# pylint: disable=wrong-import-position from monitoring.client.MonitoringClient import MonitoringClient from common.proto import context_pb2, monitoring_pb2 from common.proto.kpi_sample_types_pb2 import KpiSampleType diff --git a/src/policy/README.md b/src/policy/README.md index 4268343577871cf98b9f701a32cd8a1ff4d9a72a..a5ce13c6e68197be7909472c7ff03862909ec059 100644 --- a/src/policy/README.md +++ b/src/policy/README.md @@ -1,19 +1,77 @@ -# Policy Management TeraFlow OS service +# TeraFlowSDN Policy Management service -The Policy Management service is tested on Ubuntu 20.04. Follow the instructions below to build, test, and run this service on your local environment. +This repository hosts the TeraFlowSDN Policy Management service. +Follow the instructions below to build, test, and run this service on your local environment. -## Compile code +## TeraFlowSDN Policy Management service architecture -` +The TeraFlowSDN Policy Management service architecture consists of ten (10) interfaces listed below: + +Interfaces | +|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| 1. The `PolicyGateway` interface that implements all the RPC functions that are described in `policy.proto` file. | +| 2. The `MonitoringGateway` interface that communicates with a `Monitoring` service gRPC client to invoke key RPC functions described in `monitoring.proto` file. | +| 3. The `ContextGateway` interface that communicates with a `Context` service gRPC client to invoke key RPC functions described in `context.proto` file. | +| 4. The `ServiceGateway` interface that communicates with a `Service` service gRPC client to invoke key RPC functions described in `service.proto` file. | +| 5. The `DeviceGateway` interface that communicates with a `Device` service gRPC client to invoke key RPC functions described in `device.proto` file. | +| 6. The `PolicyService` interface that implements the Policy RPC methods by communicating with a `Monitoring` gRPC client, a `Context` gRPC client, a `Service` gRPC client, and a `Device` gRPC client through the `MonitoringService`, `ContextService`, `ServiceService`, and `DeviceService` interfaces respectively. | +| 7. The `MonitoringService` interface that implements the `SetKpiAlarm()` and `GetAlarmResponseStream()` methods by communicating with a `Monitoring` gRPC client through the use of the `MonitoringGateway` interface. | +| 8. The `ContextService` interface that implements the `GetService()`, `GetDevice()`, `GetPolicyRule`, `SetPolicyRule`, and `DeletePolicyRule` methods by communicating with a `Context` gRPC client through the use of the `ContextGateway` interface. | +| 9. The `ServiceService` interface that implements the `UpdateService()` method by communicating with a `Service` gRPC client through the use of the `ServiceGateway` interface. | +| 10. The `DeviceService` interface that implements the `ConfigureDevice()` method by communicating with a `Device` gRPC client through the use of the `DeviceGateway` interface. | + +## Prerequisites + +The TeraFlowSDN Policy Management service is currently tested against Ubuntu 20.04 and Java 11. + +To quickly install Java 11 on a Debian-based Linux distro do: + +```bash +sudo apt-get install openjdk-11-jdk -y +``` + +Feel free to try more recent Java versions. + +## Compile + +```bash ./mvnw compile -` -## Execute unit tests +``` -` +## Run tests + +```bash ./mvnw test -` +``` + ## Run service -` +```bash ./mvnw quarkus:dev -` +```` + +## Clean + +```bash +./mvnw clean +``` + +## Deploying on a Kubernetes cluster + +To create the K8s manifest file under `target/kubernetes/kubernetes.yml` to be used run + +```bash +./mvnw clean package -DskipUTs -DskipITs +``` + +To deploy the application in a K8s cluster run + +```bash +kubectl apply -f "manifests/policyservice.yaml" +``` + +## Maintainers + +This TeraFlowSDN service is implemented by [UBITECH](https://www.ubitech.eu). + +Feel free to contact Georgios Katsikas (gkatsikas at ubitech dot eu) in case you have questions. diff --git a/src/service/service/service_handler_api/_ServiceHandler.py b/src/service/service/service_handler_api/_ServiceHandler.py index 170e842cdc7d3e0a0ef5bf7e58e8042d6e956e4c..9cbe3f49e8594badf3b419b24154cb59a30a17bf 100644 --- a/src/service/service/service_handler_api/_ServiceHandler.py +++ b/src/service/service/service_handler_api/_ServiceHandler.py @@ -19,10 +19,12 @@ from device.client.DeviceClient import DeviceClient from service.service.database.ServiceModel import ServiceModel class _ServiceHandler: - def __init__( - self, db_service : ServiceModel, database : Database, context_client : ContextClient, - device_client : DeviceClient, **settings - ) -> None: + def __init__(self, + db_service: ServiceModel, + database: Database, + context_client: ContextClient, + device_client: DeviceClient, + **settings) -> None: """ Initialize Driver. Parameters: db_service @@ -30,102 +32,129 @@ class _ServiceHandler: database The instance of the local in-memory database. context_client - An instance of context client to be used to retrieve information from the service and the devices. + An instance of context client to be used to retrieve + information from the service and the devices. device_client - An instance of device client to be used to configure the devices. + An instance of device client to be used to configure + the devices. **settings Extra settings required by the service handler. """ raise NotImplementedError() def SetEndpoint( - self, endpoints : List[Tuple[str, str, Optional[str]]], connection_uuid : Optional[str] = None + self, endpoints : List[Tuple[str, str, Optional[str]]], + connection_uuid : Optional[str] = None ) -> List[Union[bool, Exception]]: - """ Set endpoints from a list. + """ Create/Update service endpoints form a list. Parameters: - endpoints : List[Tuple[str, str, Optional[str]]] - List of tuples, each containing a device_uuid, endpoint_uuid and, optionally, the topology_uuid + endpoints: List[Tuple[str, str, Optional[str]]] + List of tuples, each containing a device_uuid, + endpoint_uuid and, optionally, the topology_uuid of the endpoint to be added. connection_uuid : Optional[str] If specified, is the UUID of the connection this endpoint is associated to. Returns: - results : List[Union[bool, Exception]] - List of results for endpoint changes requested. Return values must be in the same order than - endpoints requested. If an endpoint is properly added, True must be retrieved; otherwise, the - Exception that is raised during the processing must be retrieved. + results: List[Union[bool, Exception]] + List of results for endpoint changes requested. + Return values must be in the same order as the requested + endpoints. If an endpoint is properly added, True must be + returned; otherwise, the Exception that is raised during + the processing must be returned. """ raise NotImplementedError() def DeleteEndpoint( - self, endpoints : List[Tuple[str, str, Optional[str]]], connection_uuid : Optional[str] = None + self, endpoints : List[Tuple[str, str, Optional[str]]], + connection_uuid : Optional[str] = None ) -> List[Union[bool, Exception]]: - """ Delete endpoints form a list. + """ Delete service endpoints form a list. Parameters: - endpoints : List[Tuple[str, str, Optional[str]]] - List of tuples, each containing a device_uuid, endpoint_uuid, and the topology_uuid of the endpoint + endpoints: List[Tuple[str, str, Optional[str]]] + List of tuples, each containing a device_uuid, + endpoint_uuid, and the topology_uuid of the endpoint to be removed. connection_uuid : Optional[str] If specified, is the UUID of the connection this endpoint is associated to. Returns: - results : List[Union[bool, Exception]] - List of results for endpoint deletions requested. Return values must be in the same order than - endpoints requested. If an endpoint is properly deleted, True must be retrieved; otherwise, the - Exception that is raised during the processing must be retrieved. + results: List[Union[bool, Exception]] + List of results for endpoint deletions requested. + Return values must be in the same order as the requested + endpoints. If an endpoint is properly deleted, True must be + returned; otherwise, the Exception that is raised during + the processing must be returned. """ raise NotImplementedError() - def SetConstraint(self, constraints : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: - """ Create/Update constraints. + def SetConstraint(self, constraints: List[Tuple[str, Any]]) \ + -> List[Union[bool, Exception]]: + """ Create/Update service constraints. Parameters: - constraints : List[Tuple[str, Any]] - List of tuples, each containing a constraint_type and the new constraint_value to be set. + constraints: List[Tuple[str, Any]] + List of tuples, each containing a constraint_type and the + new constraint_value to be set. Returns: - results : List[Union[bool, Exception]] - List of results for constraint changes requested. Return values must be in the same order than - constraints requested. If a constraint is properly set, True must be retrieved; otherwise, the - Exception that is raised during the processing must be retrieved. + results: List[Union[bool, Exception]] + List of results for constraint changes requested. + Return values must be in the same order as the requested + constraints. If a constraint is properly set, True must be + returned; otherwise, the Exception that is raised during + the processing must be returned. """ raise NotImplementedError() - def DeleteConstraint(self, constraints : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: - """ Delete constraints. + def DeleteConstraint(self, constraints: List[Tuple[str, Any]]) \ + -> List[Union[bool, Exception]]: + """ Delete service constraints. Parameters: - constraints : List[Tuple[str, Any]] - List of tuples, each containing a constraint_type pointing to the constraint to be deleted, and a - constraint_value containing possible additionally required values to locate the constraint to be - removed. + constraints: List[Tuple[str, Any]] + List of tuples, each containing a constraint_type pointing + to the constraint to be deleted, and a constraint_value + containing possible additionally required values to locate + the constraint to be removed. Returns: - results : List[Union[bool, Exception]] - List of results for constraint deletions requested. Return values must be in the same order than - constraints requested. If a constraint is properly deleted, True must be retrieved; otherwise, the - Exception that is raised during the processing must be retrieved. + results: List[Union[bool, Exception]] + List of results for constraint deletions requested. + Return values must be in the same order as the requested + constraints. If a constraint is properly deleted, True must + be returned; otherwise, the Exception that is raised during + the processing must be returned. """ raise NotImplementedError() - def SetConfig(self, resources : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: - """ Create/Update configuration for a list of resources. + def SetConfig(self, resources: List[Tuple[str, Any]]) \ + -> List[Union[bool, Exception]]: + """ Create/Update configuration for a list of service resources. Parameters: - resources : List[Tuple[str, Any]] - List of tuples, each containing a resource_key pointing the resource to be modified, and a - resource_value containing the new value to be set. + resources: List[Tuple[str, Any]] + List of tuples, each containing a resource_key pointing to + the resource to be modified, and a resource_value + containing the new value to be set. Returns: - results : List[Union[bool, Exception]] - List of results for resource key changes requested. Return values must be in the same order than - resource keys requested. If a resource is properly set, True must be retrieved; otherwise, the - Exception that is raised during the processing must be retrieved. + results: List[Union[bool, Exception]] + List of results for resource key changes requested. + Return values must be in the same order as the requested + resource keys. If a resource is properly set, True must be + returned; otherwise, the Exception that is raised during + the processing must be returned. """ raise NotImplementedError() - def DeleteConfig(self, resources : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: - """ Delete configuration for a list of resources. + def DeleteConfig(self, resources: List[Tuple[str, Any]]) \ + -> List[Union[bool, Exception]]: + """ Delete configuration for a list of service resources. Parameters: - resources : List[Tuple[str, Any]] - List of tuples, each containing a resource_key pointing the resource to be modified, and a - resource_value containing possible additionally required values to locate the value to be removed. + resources: List[Tuple[str, Any]] + List of tuples, each containing a resource_key pointing to + the resource to be modified, and a resource_value containing + possible additionally required values to locate the value + to be removed. Returns: - results : List[Union[bool, Exception]] - List of results for resource key deletions requested. Return values must be in the same order than - resource keys requested. If a resource is properly deleted, True must be retrieved; otherwise, the - Exception that is raised during the processing must be retrieved. + results: List[Union[bool, Exception]] + List of results for resource key deletions requested. + Return values must be in the same order as the requested + resource keys. If a resource is properly deleted, True must + be returned; otherwise, the Exception that is raised during + the processing must be returned. """ raise NotImplementedError() diff --git a/tutorial/3-2-develop-cth.md b/tutorial/3-2-develop-cth.md index eda70c9e8c411c8cc6a0ed0832f573ca787962ca..1b2a4690a3177628e18a4ca6f77365f515d6dcc5 100644 --- a/tutorial/3-2-develop-cth.md +++ b/tutorial/3-2-develop-cth.md @@ -1,5 +1,18 @@ # 3.2. Development Commands, Tricks, and Hints (WORK IN PROGRESS) +## Building, running, testing and reporting code coverage locally + +The project runs a CI/CD loops that ensures that all tests are run whenever new code is committed to our reporitory. +However, committing and waiting for the pipeline to run can take substantial time. +For this reason, we prepared a script that runs in your local machine, builds the container image and executes the tests within the image. + +To use the script receives one argument that is the name of the component you want to run. +For instance, if you want to build and run the tests of the `compute` component, you can run: + +```shell +scripts/build_run_report_tests_locally.sh compute +``` + ## Items to be addressed: