Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • tfs/controller
1 result
Show changes
Commits on Source (134)
Showing
with 989 additions and 251 deletions
#!/bin/bash
# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
########################################################################################################################
# Read deployment settings
########################################################################################################################
# If not already set, set the URL of your local Docker registry where the images will be uploaded to.
# Leave it blank if you do not want to use any Docker registry.
export TFS_REGISTRY_IMAGE=${TFS_REGISTRY_IMAGE:-""}
#export TFS_REGISTRY_IMAGE="http://my-container-registry.local/"
TFS_COMPONENTS=$1
# If not already set, set the tag you want to use for your images.
export TFS_IMAGE_TAG=${TFS_IMAGE_TAG:-"dev"}
# If not already set, set the name of the Kubernetes namespace to deploy to.
export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"}
# If not already set, set additional manifest files to be applied after the deployment
export TFS_EXTRA_MANIFESTS=${TFS_EXTRA_MANIFESTS:-""}
# If not already set, set the neew Grafana admin password
export TFS_GRAFANA_PASSWORD=${TFS_GRAFANA_PASSWORD:-"admin123+"}
########################################################################################################################
# Automated steps start here
########################################################################################################################
# Constants
GITLAB_REPO_URL="registry.gitlab.com/teraflow-h2020/controller"
TMP_FOLDER="./tmp"
# Create a tmp folder for files modified during the deployment
TMP_MANIFESTS_FOLDER="$TMP_FOLDER/manifests"
TMP_LOGS_FOLDER="$TMP_FOLDER/logs"
echo "Deploying component and collecting environment variables..."
ENV_VARS_SCRIPT=tfs_runtime_env_vars.sh
for COMPONENT in $TFS_COMPONENTS; do
echo "Processing '$COMPONENT' component..."
IMAGE_NAME="$COMPONENT:$TFS_IMAGE_TAG"
IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$IMAGE_NAME" | sed 's,//,/,g' | sed 's,http:/,,g')
echo " Building Docker image..."
BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}.log"
if [ "$COMPONENT" == "automation" ] || [ "$COMPONENT" == "policy" ]; then
docker build -t "$IMAGE_NAME" -f ./src/"$COMPONENT"/Dockerfile ./src/"$COMPONENT"/ > "$BUILD_LOG"
elif [ "$COMPONENT" == "pathcomp" ]; then
BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-frontend.log"
docker build -t "$COMPONENT-frontend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/frontend/Dockerfile . >> "$BUILD_LOG"
BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-backend.log"
docker build -t "$COMPONENT-backend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/backend/Dockerfile . >> "$BUILD_LOG"
# next command is redundant, but helpful to keep cache updated between rebuilds
docker build -t "$COMPONENT-backend:$TFS_IMAGE_TAG-builder" --target builder -f ./src/"$COMPONENT"/backend/Dockerfile . >> "$BUILD_LOG"
else
docker build -t "$IMAGE_NAME" -f ./src/"$COMPONENT"/Dockerfile . > "$BUILD_LOG"
fi
if [ -n "$TFS_REGISTRY_IMAGE" ]; then
echo " Pushing Docker image to '$TFS_REGISTRY_IMAGE'..."
if [ "$COMPONENT" == "pathcomp" ]; then
TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-frontend.log"
docker tag "$COMPONENT-frontend:$TFS_IMAGE_TAG" "$IMAGE_URL-frontend" > "$TAG_LOG"
TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-backend.log"
docker tag "$COMPONENT-backend:$TFS_IMAGE_TAG" "$IMAGE_URL-backend" > "$TAG_LOG"
PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-frontend.log"
docker push "$IMAGE_URL-frontend" > "$PUSH_LOG"
PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-backend.log"
docker push "$IMAGE_URL-backend" > "$PUSH_LOG"
else
TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}.log"
docker tag "$IMAGE_NAME" "$IMAGE_URL" > "$TAG_LOG"
PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}.log"
docker push "$IMAGE_URL" > "$PUSH_LOG"
fi
fi
echo " Adapting '$COMPONENT' manifest file..."
MANIFEST="$TMP_MANIFESTS_FOLDER/${COMPONENT}service.yaml"
cp ./manifests/"${COMPONENT}"service.yaml "$MANIFEST"
if [ -n "$TFS_REGISTRY_IMAGE" ]; then
# Registry is set
if [ "$COMPONENT" == "pathcomp" ]; then
VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-frontend:" "$MANIFEST" | cut -d ":" -f3)
sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-frontend:${VERSION}#image: $IMAGE_URL-frontend#g" "$MANIFEST"
VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-backend:" "$MANIFEST" | cut -d ":" -f3)
sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-backend:${VERSION}#image: $IMAGE_URL-backend#g" "$MANIFEST"
sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Always#g" "$MANIFEST"
else
VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f3)
sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST"
sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Always#g" "$MANIFEST"
fi
else
# Registry is not set
if [ "$COMPONENT" == "pathcomp" ]; then
VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-frontend:" "$MANIFEST" | cut -d ":" -f3)
sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-frontend:${VERSION}#image: $IMAGE_NAME-frontend#g" "$MANIFEST"
VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-backend:" "$MANIFEST" | cut -d ":" -f3)
sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-backend:${VERSION}#image: $IMAGE_NAME-backend#g" "$MANIFEST"
sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Never#g" "$MANIFEST"
else
VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f3)
sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT:${VERSION}#image: $IMAGE_NAME#g" "$MANIFEST"
sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Never#g" "$MANIFEST"
fi
fi
# TODO: harmonize names of the monitoring component
echo " Deploying '$COMPONENT' component to Kubernetes..."
DEPLOY_LOG="$TMP_LOGS_FOLDER/deploy_${COMPONENT}.log"
kubectl --namespace $TFS_K8S_NAMESPACE delete -f "$MANIFEST" > "$DEPLOY_LOG"
kubectl --namespace $TFS_K8S_NAMESPACE apply -f "$MANIFEST" > "$DEPLOY_LOG"
COMPONENT_OBJNAME=$(echo "${COMPONENT}" | sed "s/\_/-/")
kubectl --namespace $TFS_K8S_NAMESPACE scale deployment --replicas=0 ${COMPONENT_OBJNAME}service >> "$DEPLOY_LOG"
kubectl --namespace $TFS_K8S_NAMESPACE scale deployment --replicas=1 ${COMPONENT_OBJNAME}service >> "$DEPLOY_LOG"
echo " Collecting env-vars for '$COMPONENT' component..."
SERVICE_DATA=$(kubectl get service ${COMPONENT}service --namespace $TFS_K8S_NAMESPACE -o json)
if [ -z "${SERVICE_DATA}" ]; then continue; fi
# Env vars for service's host address
SERVICE_HOST=$(echo ${SERVICE_DATA} | jq -r '.spec.clusterIP')
if [ -z "${SERVICE_HOST}" ]; then continue; fi
# TODO: remove previous value from file
ENVVAR_HOST=$(echo "${COMPONENT}service_SERVICE_HOST" | tr '[:lower:]' '[:upper:]')
echo "export ${ENVVAR_HOST}=${SERVICE_HOST}" >> $ENV_VARS_SCRIPT
# Env vars for service's 'grpc' port (if any)
SERVICE_PORT_GRPC=$(echo ${SERVICE_DATA} | jq -r '.spec.ports[] | select(.name=="grpc") | .port')
if [ -n "${SERVICE_PORT_GRPC}" ]; then
ENVVAR_PORT_GRPC=$(echo "${COMPONENT}service_SERVICE_PORT_GRPC" | tr '[:lower:]' '[:upper:]')
echo "export ${ENVVAR_PORT_GRPC}=${SERVICE_PORT_GRPC}" >> $ENV_VARS_SCRIPT
fi
# Env vars for service's 'http' port (if any)
SERVICE_PORT_HTTP=$(echo ${SERVICE_DATA} | jq -r '.spec.ports[] | select(.name=="http") | .port')
if [ -n "${SERVICE_PORT_HTTP}" ]; then
ENVVAR_PORT_HTTP=$(echo "${COMPONENT}service_SERVICE_PORT_HTTP" | tr '[:lower:]' '[:upper:]')
echo "export ${ENVVAR_PORT_HTTP}=${SERVICE_PORT_HTTP}" >> $ENV_VARS_SCRIPT
fi
printf "\n"
done
# By now, leave this control here. Some component dependencies are not well handled
for COMPONENT in $TFS_COMPONENTS; do
echo "Waiting for '$COMPONENT' component..."
kubectl wait --namespace $TFS_K8S_NAMESPACE \
--for='condition=available' --timeout=300s deployment/${COMPONENT}service
printf "\n"
done
./show_deploy.sh
echo "Done!"
src/tests/ecoc22/
\ No newline at end of file
......@@ -34,7 +34,7 @@ spec:
- containerPort: 2020
env:
- name: LOG_LEVEL
value: "INFO"
value: "DEBUG"
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:2020"]
......
......@@ -34,7 +34,7 @@ spec:
- containerPort: 3030
env:
- name: LOG_LEVEL
value: "INFO"
value: "DEBUG"
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:3030"]
......
# Set the URL of your local Docker registry where the images will be uploaded to.
export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/"
# Set the list of components, separated by comas, you want to build images for, and deploy.
# Set the list of components, separated by spaces, you want to build images for, and deploy.
# Supported components are:
# context device automation policy service compute monitoring webui
# interdomain slice pathcomp dlt
# dbscanserving opticalattackmitigator opticalcentralizedattackdetector
# dbscanserving opticalattackmitigator opticalattackdetector
# l3_attackmitigator l3_centralizedattackdetector l3_distributedattackdetector
export TFS_COMPONENTS="context device automation service compute monitoring webui"
export TFS_COMPONENTS="context device automation monitoring pathcomp service slice compute webui"
# Set the tag you want to use for your images.
export TFS_IMAGE_TAG="dev"
......
......@@ -188,6 +188,7 @@ message DeviceList {
message DeviceEvent {
Event event = 1;
DeviceId device_id = 2;
DeviceConfig device_config = 3;
}
......
#!/bin/bash
########################################################################################################################
# Define your deployment settings here
########################################################################################################################
# Set the URL of your local Docker registry where the images will be uploaded to. Leave it blank if you do not want to
# use any Docker registry.
REGISTRY_IMAGE=""
#REGISTRY_IMAGE="http://my-container-registry.local/"
# Set the list of components you want to build images for, and deploy.
COMPONENTS="context device automation policy service compute monitoring centralizedattackdetector"
# Set the tag you want to use for your images.
IMAGE_TAG="tf-dev"
# Constants
TMP_FOLDER="./tmp"
TMP_LOGS_FOLDER="$TMP_FOLDER/logs"
mkdir -p $TMP_LOGS_FOLDER
for COMPONENT in $COMPONENTS; do
echo "Processing '$COMPONENT' component..."
IMAGE_NAME="$COMPONENT:$IMAGE_TAG"
IMAGE_URL="$REGISTRY_IMAGE/$IMAGE_NAME"
echo " Building Docker image..."
BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}.log"
if [ "$COMPONENT" == "automation" ] || [ "$COMPONENT" == "policy" ]; then
docker build -t "$IMAGE_NAME" -f ./src/"$COMPONENT"/Dockerfile ./src/"$COMPONENT"/ > "$BUILD_LOG"
else
docker build -t "$IMAGE_NAME" -f ./src/"$COMPONENT"/Dockerfile ./src/ > "$BUILD_LOG"
fi
if [ -n "$REGISTRY_IMAGE" ]; then
echo "Pushing Docker image to '$REGISTRY_IMAGE'..."
TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}.log"
docker tag "$IMAGE_NAME" "$IMAGE_URL" > "$TAG_LOG"
PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}.log"
docker push "$IMAGE_URL" > "$PUSH_LOG"
fi
done
echo "Preparing for running the tests..."
if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create -d bridge teraflowbridge; fi
for COMPONENT in $COMPONENTS; do
IMAGE_NAME="$COMPONENT:$IMAGE_TAG"
echo " Running tests for $COMPONENT:"
docker run -it -d --name $COMPONENT $IMAGE_NAME --network=teraflowbridge
docker exec -it $COMPONENT bash -c "pytest --log-level=DEBUG --verbose $COMPONENT/tests/test_unitary.py"
docker stop $COMPONENT
done
#!/bin/bash
# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
die () {
echo >&2 "$@"
exit 1
}
[ "$#" -eq 1 ] || die "component name required but not provided"
COMPONENT_NAME=$1 # parameter
IMAGE_NAME="${COMPONENT_NAME}-local"
IMAGE_TAG="latest"
if docker ps | grep $IMAGE_NAME
then
docker stop $IMAGE_NAME
fi
if docker network list | grep teraflowbridge
then
echo "teraflowbridge is already created"
else
docker network create -d bridge teraflowbridge
fi
docker build -t "$IMAGE_NAME:$IMAGE_TAG" -f ./src/$COMPONENT_NAME/Dockerfile .
docker run --name $IMAGE_NAME -d -v "${PWD}/src/${COMPONENT_NAME}/tests:/home/${COMPONENT_NAME}/results" --network=teraflowbridge --rm $IMAGE_NAME:$IMAGE_TAG
docker exec -i $IMAGE_NAME bash -c "coverage run -m pytest --log-level=INFO --verbose $COMPONENT_NAME/tests/ --junitxml=/home/${COMPONENT_NAME}/results/${COMPONENT_NAME}_report.xml"
PROJECTDIR=`pwd`
cd $PROJECTDIR/src
RCFILE=$PROJECTDIR/coverage/.coveragerc
echo
echo "Coverage report:"
echo "----------------"
docker exec -i $IMAGE_NAME bash -c "coverage report --include='${COMPONENT_NAME}/*' --show-missing"
# docker stop $IMAGE_NAME
docker rm -f $IMAGE_NAME
docker network rm teraflowbridge
#!/bin/bash
# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
########################################################################################################################
# Define your deployment settings here
########################################################################################################################
# If not already set, set the name of the Kubernetes namespace to deploy to.
export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"}
########################################################################################################################
# Automated steps start here
########################################################################################################################
mkdir -p tmp/exec_logs/$TFS_K8S_NAMESPACE/
rm tmp/exec_logs/$TFS_K8S_NAMESPACE/*
PODS=$(kubectl get pods --namespace $TFS_K8S_NAMESPACE --no-headers --output=custom-columns=":metadata.name")
for POD in $PODS; do
CONTAINERS=$(kubectl get pods --namespace $TFS_K8S_NAMESPACE $POD -o jsonpath='{.spec.containers[*].name}')
for CONTAINER in $CONTAINERS; do
kubectl --namespace $TFS_K8S_NAMESPACE logs pod/${POD} --container ${CONTAINER} \
> tmp/exec_logs/$TFS_K8S_NAMESPACE/$POD\_\_$CONTAINER.log
done
done
......@@ -21,4 +21,5 @@ RCFILE=$PROJECTDIR/coverage/.coveragerc
# Run unitary tests and analyze coverage of code at same time
coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
service/tests/test_unitary_task_scheduler.py \
service/tests/test_unitary.py
......@@ -24,4 +24,4 @@ export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs-dev"}
# Automated steps start here
########################################################################################################################
kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/monitoringservice -c server
kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/monitoringserver
#!/bin/bash
# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
########################################################################################################################
# Define your deployment settings here
########################################################################################################################
# If not already set, set the name of the Kubernetes namespace to deploy to.
export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"}
########################################################################################################################
# Automated steps start here
########################################################################################################################
kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/sliceservice
# Automation TeraFlow OS service
# TeraFlowSDN Automation service
The Automation service, also known as Zero-Touch Provisioning (ZTP), is tested on Ubuntu 20.04. Follow the instructions below to build, test, and run this service on your local environment.
This repository hosts the TeraFlowSDN Automation service, also known as Zero-Touch Provisioning (ZTP) service.
Follow the instructions below to build, test, and run this service on your local environment.
## Automation Teraflow OS service architecture
## TeraFlowSDN Automation service architecture
| The Automation Teraflow OS service architecture consists of six (6) interfaces listed below: |
The TeraFlowSDN Automation architecture consists of six (6) interfaces listed below:
Interfaces |
|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| 1. The `AutomationGateway` interface that implements all the rpc functions that are described in `automation.proto` file. |
| 2. The `ContextGateway` interface that communicates with a `Context` Service gRPC client and implements all the rpc functions that are described in `context.proto` file. |
| 3. The `DeviceGateway` interface that communicates with a `Device` Service gRPC client and implements all the rpc functions that are described in `device.proto` file. |
| 4. The `AutomationService` interface that implements the `addDevice()` method by communicating with a `Context` gRPC client & a `Device` gRPC client through the use of `ContextService` interface & `DeviceService` interface respectively. |
| 5. The `ContextService` interface that implements the `getDevice()` & `getDeviceEvents()` methods by communicating with a `Context` gRPC client through the use of `ContextGateway` interface. |
| 6. The `DeviceService` interface that implements the `getInitialConfiguration()` & `configureDevice()` methods by communicating with a `Device` gRPC client through the use of `DeviceGateway` interface. |
| 1. The `AutomationGateway` interface that implements all the RPC functions that are described in `automation.proto` file. |
| 2. The `ContextGateway` interface that communicates with a `Context` Service gRPC client to invoke key RPC functions described in `context.proto` file. |
| 3. The `DeviceGateway` interface that communicates with a `Device` Service gRPC client to invoke key RPC functions described in `device.proto` file. |
| 4. The `AutomationService` interface that implements the `addDevice()`, `updateDevice()`, and `deleteDevice()` methods by communicating with a `Context` gRPC client and a `Device` gRPC client through the use of `ContextService` interface and `DeviceService` interface respectively. |
| 5. The `ContextService` interface that implements the `getDevice()` and `getDeviceEvents()` methods by communicating with a `Context` gRPC client through the use of `ContextGateway` interface. |
| 6. The `DeviceService` interface that implements the `getInitialConfiguration()`, `configureDevice()`, and `deleteDevice()` methods by communicating with a `Device` gRPC client through the use of `DeviceGateway` interface. |
## Prerequisites
The Automation service is currently tested against Ubuntu 20.04 and Java 11.
## Run with dev profile
To quickly install Java 11 on a Debian-based Linux distro do:
```bash
./mvnw clean quarkus:dev
sudo apt-get install openjdk-11-jdk -y
```
## Running tests
Feel free to try more recent Java versions.
## Compile
```bash
./mvnw compile
```
## Run tests
```bash
./mvnw test
```
Run unit and functional tests `./mvnw clean test`
## Run service
```bash
./mvnw quarkus:dev
````
## Clean
```bash
./mvnw clean
```
## Deploying on a Kubernetes cluster
......@@ -30,10 +59,16 @@ To create the K8s manifest file under `target/kubernetes/kubernetes.yml` to be u
```bash
./mvnw clean package -DskipUTs -DskipITs
```
```
To deploy the application in a K8s cluster run
```bash
kubectl apply -f "manifests/automationservice.yaml"
```
## Maintainers
This TeraFlowSDN service is implemented by [UBITECH](https://www.ubitech.eu).
Feel free to contact Georgios Katsikas (gkatsikas at ubitech dot eu) in case you have questions.
......@@ -217,6 +217,7 @@ public class Serializer {
builder.setDeviceId(deviceId);
builder.setEvent(serialize(deviceEvent.getEvent()));
builder.setDeviceConfig(serialize(deviceEvent.getDeviceConfig().orElse(null)));
return builder.build();
}
......@@ -224,8 +225,9 @@ public class Serializer {
public DeviceEvent deserialize(ContextOuterClass.DeviceEvent deviceEvent) {
final var deviceId = deserialize(deviceEvent.getDeviceId());
final var event = deserialize(deviceEvent.getEvent());
final var deviceConfig = deserialize(deviceEvent.getDeviceConfig());
return new DeviceEvent(deviceId, event);
return new DeviceEvent(deviceId, event, deviceConfig);
}
public ContextOuterClass.ConfigActionEnum serialize(ConfigActionEnum configAction) {
......
......@@ -16,14 +16,23 @@
package eu.teraflow.automation.context.model;
import java.util.Optional;
public class DeviceEvent {
private final Event event;
private final String deviceId;
private final Optional<DeviceConfig> deviceConfig;
public DeviceEvent(String deviceId, Event event) {
this(deviceId, event, null);
}
public DeviceEvent(String deviceId, Event event, DeviceConfig deviceConfig) {
this.event = event;
this.deviceId = deviceId;
this.deviceConfig =
(deviceConfig == null) ? Optional.empty() : Optional.ofNullable(deviceConfig);
}
public Event getEvent() {
......@@ -34,8 +43,14 @@ public class DeviceEvent {
return deviceId;
}
public Optional<DeviceConfig> getDeviceConfig() {
return deviceConfig;
}
@Override
public String toString() {
return String.format("%s[%s, %s]", getClass().getSimpleName(), deviceId, event.toString());
return String.format(
"%s[%s, %s, %s]",
getClass().getSimpleName(), deviceId, event.toString(), deviceConfig.orElse(null));
}
}
......@@ -308,14 +308,51 @@ class SerializerTest {
.setTimestamp(expectedTimestamp)
.setEventType(ContextOuterClass.EventTypeEnum.EVENTTYPE_CREATE)
.build();
final var expectedConfigRuleCustomA =
ContextOuterClass.ConfigRule_Custom.newBuilder()
.setResourceKey("resourceKeyA")
.setResourceValue("resourceValueA")
.build();
final var expectedConfigRuleCustomB =
ContextOuterClass.ConfigRule_Custom.newBuilder()
.setResourceKey("resourceKeyB")
.setResourceValue("resourceValueB")
.build();
final var expectedConfigRuleA =
ContextOuterClass.ConfigRule.newBuilder()
.setAction(ContextOuterClass.ConfigActionEnum.CONFIGACTION_SET)
.setCustom(expectedConfigRuleCustomA)
.build();
final var expectedConfigRuleB =
ContextOuterClass.ConfigRule.newBuilder()
.setAction(ContextOuterClass.ConfigActionEnum.CONFIGACTION_DELETE)
.setCustom(expectedConfigRuleCustomB)
.build();
final var expectedDeviceConfig =
ContextOuterClass.DeviceConfig.newBuilder()
.addAllConfigRules(List.of(expectedConfigRuleA, expectedConfigRuleB))
.build();
final var expectedDeviceEvent =
ContextOuterClass.DeviceEvent.newBuilder()
.setDeviceId(expectedDeviceId)
.setEvent(expectedEvent)
.setDeviceConfig(expectedDeviceConfig)
.build();
final var creationEvent = new Event(1, EventTypeEnum.CREATE);
final var deviceEvent = new DeviceEvent("deviceId", creationEvent);
final var configRuleCustomA = new ConfigRuleCustom("resourceKeyA", "resourceValueA");
final var configRuleCustomB = new ConfigRuleCustom("resourceKeyB", "resourceValueB");
final var configRuleTypeA = new ConfigRuleTypeCustom(configRuleCustomA);
final var configRuleTypeB = new ConfigRuleTypeCustom(configRuleCustomB);
final var configRuleA = new ConfigRule(ConfigActionEnum.SET, configRuleTypeA);
final var configRuleB = new ConfigRule(ConfigActionEnum.DELETE, configRuleTypeB);
final var deviceConfig = new DeviceConfig(List.of(configRuleA, configRuleB));
final var deviceEvent = new DeviceEvent("deviceId", creationEvent, deviceConfig);
final var serializedDeviceEvent = serializer.serialize(deviceEvent);
assertThat(serializedDeviceEvent).usingRecursiveComparison().isEqualTo(expectedDeviceEvent);
......@@ -328,7 +365,22 @@ class SerializerTest {
final var expectedTimestamp = ContextOuterClass.Timestamp.newBuilder().setTimestamp(1).build();
final var creationEvent = new Event(1, expectedEventType);
final var expectedDeviceEvent = new DeviceEvent(dummyDeviceId, creationEvent);
final var expectedConfigRuleCustomA = new ConfigRuleCustom("resourceKeyA", "resourceValueA");
final var expectedConfigRuleCustomB = new ConfigRuleCustom("resourceKeyB", "resourceValueB");
final var expectedConfigRuleTypeA = new ConfigRuleTypeCustom(expectedConfigRuleCustomA);
final var expectedConfigRuleTypeB = new ConfigRuleTypeCustom(expectedConfigRuleCustomB);
final var expectedConfigRuleA = new ConfigRule(ConfigActionEnum.SET, expectedConfigRuleTypeA);
final var expectedConfigRuleB =
new ConfigRule(ConfigActionEnum.DELETE, expectedConfigRuleTypeB);
final var expectedDeviceConfig =
new DeviceConfig(List.of(expectedConfigRuleA, expectedConfigRuleB));
final var expectedDeviceEvent =
new DeviceEvent(dummyDeviceId, creationEvent, expectedDeviceConfig);
final var deviceUuid = Uuid.newBuilder().setUuid("deviceId");
final var deviceId = DeviceId.newBuilder().setDeviceUuid(deviceUuid).build();
......@@ -337,8 +389,38 @@ class SerializerTest {
.setTimestamp(expectedTimestamp)
.setEventType(ContextOuterClass.EventTypeEnum.EVENTTYPE_REMOVE)
.build();
final var configRuleCustomA =
ContextOuterClass.ConfigRule_Custom.newBuilder()
.setResourceKey("resourceKeyA")
.setResourceValue("resourceValueA")
.build();
final var configRuleCustomB =
ContextOuterClass.ConfigRule_Custom.newBuilder()
.setResourceKey("resourceKeyB")
.setResourceValue("resourceValueB")
.build();
final var configRuleA =
ContextOuterClass.ConfigRule.newBuilder()
.setAction(ContextOuterClass.ConfigActionEnum.CONFIGACTION_SET)
.setCustom(configRuleCustomA)
.build();
final var configRuleB =
ContextOuterClass.ConfigRule.newBuilder()
.setAction(ContextOuterClass.ConfigActionEnum.CONFIGACTION_DELETE)
.setCustom(configRuleCustomB)
.build();
final var deviceConfig =
ContextOuterClass.DeviceConfig.newBuilder()
.addAllConfigRules(List.of(configRuleA, configRuleB))
.build();
final var serializedDeviceEvent =
ContextOuterClass.DeviceEvent.newBuilder().setDeviceId(deviceId).setEvent(event).build();
ContextOuterClass.DeviceEvent.newBuilder()
.setDeviceId(deviceId)
.setEvent(event)
.setDeviceConfig(deviceConfig)
.build();
final var deviceEvent = serializer.deserialize(serializedDeviceEvent);
assertThat(deviceEvent).usingRecursiveComparison().isEqualTo(expectedDeviceEvent);
......
......@@ -56,6 +56,8 @@ public interface ContextService extends MutinyService {
io.smallrye.mutiny.Uni<context.ContextOuterClass.ServiceId> setService(context.ContextOuterClass.Service request);
io.smallrye.mutiny.Uni<context.ContextOuterClass.ServiceId> unsetService(context.ContextOuterClass.Service request);
io.smallrye.mutiny.Uni<context.ContextOuterClass.Empty> removeService(context.ContextOuterClass.ServiceId request);
io.smallrye.mutiny.Uni<context.ContextOuterClass.SliceIdList> listSliceIds(context.ContextOuterClass.ContextId request);
......@@ -66,6 +68,8 @@ public interface ContextService extends MutinyService {
io.smallrye.mutiny.Uni<context.ContextOuterClass.SliceId> setSlice(context.ContextOuterClass.Slice request);
io.smallrye.mutiny.Uni<context.ContextOuterClass.SliceId> unsetSlice(context.ContextOuterClass.Slice request);
io.smallrye.mutiny.Uni<context.ContextOuterClass.Empty> removeSlice(context.ContextOuterClass.SliceId request);
io.smallrye.mutiny.Uni<context.ContextOuterClass.ConnectionIdList> listConnectionIds(context.ContextOuterClass.ServiceId request);
......
......@@ -208,6 +208,14 @@ public class ContextServiceBean extends MutinyContextServiceGrpc.ContextServiceI
}
}
@Override
public io.smallrye.mutiny.Uni<context.ContextOuterClass.ServiceId> unsetService(context.ContextOuterClass.Service request) {
try {
return delegate.unsetService(request);
} catch (UnsupportedOperationException e) {
throw new io.grpc.StatusRuntimeException(io.grpc.Status.UNIMPLEMENTED);
}
}
@Override
public io.smallrye.mutiny.Uni<context.ContextOuterClass.Empty> removeService(context.ContextOuterClass.ServiceId request) {
try {
return delegate.removeService(request);
......@@ -248,6 +256,14 @@ public class ContextServiceBean extends MutinyContextServiceGrpc.ContextServiceI
}
}
@Override
public io.smallrye.mutiny.Uni<context.ContextOuterClass.SliceId> unsetSlice(context.ContextOuterClass.Slice request) {
try {
return delegate.unsetSlice(request);
} catch (UnsupportedOperationException e) {
throw new io.grpc.StatusRuntimeException(io.grpc.Status.UNIMPLEMENTED);
}
}
@Override
public io.smallrye.mutiny.Uni<context.ContextOuterClass.Empty> removeSlice(context.ContextOuterClass.SliceId request) {
try {
return delegate.removeSlice(request);
......
......@@ -117,6 +117,10 @@ public class ContextServiceClient implements ContextService, MutinyClient<Mutiny
return stub.setService(request);
}
@Override
public io.smallrye.mutiny.Uni<context.ContextOuterClass.ServiceId> unsetService(context.ContextOuterClass.Service request) {
return stub.unsetService(request);
}
@Override
public io.smallrye.mutiny.Uni<context.ContextOuterClass.Empty> removeService(context.ContextOuterClass.ServiceId request) {
return stub.removeService(request);
}
......@@ -137,6 +141,10 @@ public class ContextServiceClient implements ContextService, MutinyClient<Mutiny
return stub.setSlice(request);
}
@Override
public io.smallrye.mutiny.Uni<context.ContextOuterClass.SliceId> unsetSlice(context.ContextOuterClass.Slice request) {
return stub.unsetSlice(request);
}
@Override
public io.smallrye.mutiny.Uni<context.ContextOuterClass.Empty> removeSlice(context.ContextOuterClass.SliceId request) {
return stub.removeSlice(request);
}
......