diff --git a/.gitignore b/.gitignore index 5dc4372a5956ba5ce2b5ef6fc4359616c7cb5cd5..7e3b0cd6a26b755aeac4422f530c331d25a0cc43 100644 --- a/.gitignore +++ b/.gitignore @@ -162,6 +162,7 @@ cython_debug/ # TeraFlowSDN-generated files tfs_runtime_env_vars.sh +tfs_bchain_runtime_env_vars.sh delete_local_deployment.sh local_docker_deployment.sh local_k8s_deployment.sh diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 3de792462d28b2d42e71b0329aefce2c2928984e..dac76342a9fdb48247cc171cfdf37fd6b60600ba 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -42,3 +42,4 @@ include: #- local: '/src/slice/.gitlab-ci.yml' #- local: '/src/interdomain/.gitlab-ci.yml' - local: '/src/pathcomp/.gitlab-ci.yml' + #- local: '/src/dlt/.gitlab-ci.yml' diff --git a/deploy.sh b/deploy.sh index b0bd374422820be5b3cb88dbf096a1fd037bf14e..add41fa139a0127cb26d652f5b47decfe8658ad0 100755 --- a/deploy.sh +++ b/deploy.sh @@ -66,44 +66,71 @@ echo "export PYTHONPATH=${PYTHONPATH}" >> $ENV_VARS_SCRIPT for COMPONENT in $TFS_COMPONENTS; do echo "Processing '$COMPONENT' component..." - IMAGE_NAME="$COMPONENT:$TFS_IMAGE_TAG" - IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$IMAGE_NAME" | sed 's,//,/,g' | sed 's,http:/,,g') echo " Building Docker image..." BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}.log" if [ "$COMPONENT" == "automation" ] || [ "$COMPONENT" == "policy" ]; then - docker build -t "$IMAGE_NAME" -f ./src/"$COMPONENT"/Dockerfile ./src/"$COMPONENT"/ > "$BUILD_LOG" + docker build -t "$COMPONENT:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/Dockerfile ./src/"$COMPONENT"/ > "$BUILD_LOG" elif [ "$COMPONENT" == "pathcomp" ]; then BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-frontend.log" - docker build -t "$COMPONENT-frontend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/frontend/Dockerfile . >> "$BUILD_LOG" + docker build -t "$COMPONENT-frontend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/frontend/Dockerfile . > "$BUILD_LOG" BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-backend.log" - docker build -t "$COMPONENT-backend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/backend/Dockerfile . >> "$BUILD_LOG" + docker build -t "$COMPONENT-backend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/backend/Dockerfile . > "$BUILD_LOG" # next command is redundant, but helpful to keep cache updated between rebuilds - docker build -t "$COMPONENT-backend:$TFS_IMAGE_TAG-builder" --target builder -f ./src/"$COMPONENT"/backend/Dockerfile . >> "$BUILD_LOG" + IMAGE_NAME="$COMPONENT-backend:$TFS_IMAGE_TAG-builder" + docker build -t "$IMAGE_NAME" --target builder -f ./src/"$COMPONENT"/backend/Dockerfile . >> "$BUILD_LOG" + elif [ "$COMPONENT" == "dlt" ]; then + BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-connector.log" + docker build -t "$COMPONENT-connector:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/connector/Dockerfile . > "$BUILD_LOG" + + BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-gateway.log" + docker build -t "$COMPONENT-gateway:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/gateway/Dockerfile . > "$BUILD_LOG" else - docker build -t "$IMAGE_NAME" -f ./src/"$COMPONENT"/Dockerfile . > "$BUILD_LOG" + docker build -t "$COMPONENT:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/Dockerfile . > "$BUILD_LOG" fi if [ -n "$TFS_REGISTRY_IMAGE" ]; then echo " Pushing Docker image to '$TFS_REGISTRY_IMAGE'..." if [ "$COMPONENT" == "pathcomp" ]; then - TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-frontend.log" - docker tag "$COMPONENT-frontend:$TFS_IMAGE_TAG" "$IMAGE_URL-frontend" > "$TAG_LOG" + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT-frontend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') - TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-backend.log" - docker tag "$COMPONENT-backend:$TFS_IMAGE_TAG" "$IMAGE_URL-backend" > "$TAG_LOG" + TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-frontend.log" + docker tag "$COMPONENT-frontend:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG" PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-frontend.log" - docker push "$IMAGE_URL-frontend" > "$PUSH_LOG" + docker push "$IMAGE_URL" > "$PUSH_LOG" + + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT-backend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + + TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-backend.log" + docker tag "$COMPONENT-backend:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG" PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-backend.log" - docker push "$IMAGE_URL-backend" > "$PUSH_LOG" + docker push "$IMAGE_URL" > "$PUSH_LOG" + elif [ "$COMPONENT" == "dlt" ]; then + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT-connector:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + + TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-connector.log" + docker tag "$COMPONENT-connector:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG" + + PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-connector.log" + docker push "$IMAGE_URL" > "$PUSH_LOG" + + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT-gateway:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + + TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-gateway.log" + docker tag "$COMPONENT-gateway:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG" + + PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-gateway.log" + docker push "$IMAGE_URL" > "$PUSH_LOG" else + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}.log" - docker tag "$IMAGE_NAME" "$IMAGE_URL" > "$TAG_LOG" + docker tag "$COMPONENT:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG" PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}.log" docker push "$IMAGE_URL" > "$PUSH_LOG" @@ -117,33 +144,48 @@ for COMPONENT in $TFS_COMPONENTS; do if [ -n "$TFS_REGISTRY_IMAGE" ]; then # Registry is set if [ "$COMPONENT" == "pathcomp" ]; then + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT-frontend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-frontend:" "$MANIFEST" | cut -d ":" -f3) - sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-frontend:${VERSION}#image: $IMAGE_URL-frontend#g" "$MANIFEST" + sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-frontend:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST" + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT-backend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-backend:" "$MANIFEST" | cut -d ":" -f3) - sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-backend:${VERSION}#image: $IMAGE_URL-backend#g" "$MANIFEST" - - sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Always#g" "$MANIFEST" + sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-backend:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST" + elif [ "$COMPONENT" == "dlt" ]; then + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT-connector:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-connector:" "$MANIFEST" | cut -d ":" -f3) + sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-connector:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST" + + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT-gateway:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-gateway:" "$MANIFEST" | cut -d ":" -f3) + sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-gateway:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST" else + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f3) sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST" - sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Always#g" "$MANIFEST" fi + + sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Always#g" "$MANIFEST" else # Registry is not set if [ "$COMPONENT" == "pathcomp" ]; then VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-frontend:" "$MANIFEST" | cut -d ":" -f3) - sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-frontend:${VERSION}#image: $IMAGE_NAME-frontend#g" "$MANIFEST" + sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-frontend:${VERSION}#image: $COMPONENT-frontend:$TFS_IMAGE_TAG#g" "$MANIFEST" VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-backend:" "$MANIFEST" | cut -d ":" -f3) - sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-backend:${VERSION}#image: $IMAGE_NAME-backend#g" "$MANIFEST" + sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-backend:${VERSION}#image: $COMPONENT-backend:$TFS_IMAGE_TAG#g" "$MANIFEST" + elif [ "$COMPONENT" == "dlt" ]; then + VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-connector:" "$MANIFEST" | cut -d ":" -f3) + sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-connector:${VERSION}#image: $COMPONENT-connector:$TFS_IMAGE_TAG#g" "$MANIFEST" - sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Never#g" "$MANIFEST" + VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-gateway:" "$MANIFEST" | cut -d ":" -f3) + sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-gateway:${VERSION}#image: $COMPONENT-gateway:$TFS_IMAGE_TAG#g" "$MANIFEST" else VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f3) - sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT:${VERSION}#image: $IMAGE_NAME#g" "$MANIFEST" - sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Never#g" "$MANIFEST" + sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT:${VERSION}#image: $COMPONENT:$TFS_IMAGE_TAG#g" "$MANIFEST" fi + + sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Never#g" "$MANIFEST" fi # TODO: harmonize names of the monitoring component @@ -157,7 +199,7 @@ for COMPONENT in $TFS_COMPONENTS; do echo " Collecting env-vars for '$COMPONENT' component..." - SERVICE_DATA=$(kubectl get service ${COMPONENT}service --namespace $TFS_K8S_NAMESPACE -o json) + SERVICE_DATA=$(kubectl get service ${COMPONENT_OBJNAME}service --namespace $TFS_K8S_NAMESPACE -o json) if [ -z "${SERVICE_DATA}" ]; then continue; fi # Env vars for service's host address @@ -189,6 +231,7 @@ for EXTRA_MANIFEST in $TFS_EXTRA_MANIFESTS; do kubectl --namespace $TFS_K8S_NAMESPACE apply -f $EXTRA_MANIFEST printf "\n" done +printf "\n" # By now, leave these controls here. Some component dependencies are not well handled. @@ -203,8 +246,9 @@ fi for COMPONENT in $TFS_COMPONENTS; do echo "Waiting for '$COMPONENT' component..." + COMPONENT_OBJNAME=$(echo "${COMPONENT}" | sed "s/\_/-/") kubectl wait --namespace $TFS_K8S_NAMESPACE \ - --for='condition=available' --timeout=300s deployment/${COMPONENT}service + --for='condition=available' --timeout=300s deployment/${COMPONENT_OBJNAME}service printf "\n" done diff --git a/deploy_mock_blockchain.sh b/deploy_mock_blockchain.sh new file mode 100755 index 0000000000000000000000000000000000000000..066820fc0f9a1005823dd124798e4de122f206f8 --- /dev/null +++ b/deploy_mock_blockchain.sh @@ -0,0 +1,121 @@ +#!/bin/bash +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +######################################################################################################################## +# Read deployment settings +######################################################################################################################## + +# Set the URL of your local Docker registry where the images will be uploaded to. +REGISTRY_IMAGE="http://localhost:32000/tfs/" + +# Set the tag you want to use for your images. +IMAGE_TAG="dev" + +# Set the name of the Kubernetes namespace to deploy to. +K8S_NAMESPACE="tfs-bchain" + +COMPONENT="mock_blockchain" + +######################################################################################################################## +# Automated steps start here +######################################################################################################################## + +# Constants +GITLAB_REPO_URL="registry.gitlab.com/teraflow-h2020/controller" +TMP_FOLDER="./tmp" + +# Create a tmp folder for files modified during the deployment +TMP_MANIFESTS_FOLDER="$TMP_FOLDER/manifests" +mkdir -p $TMP_MANIFESTS_FOLDER +TMP_LOGS_FOLDER="$TMP_FOLDER/logs" +mkdir -p $TMP_LOGS_FOLDER + +echo "Deleting and Creating a new namespace..." +kubectl delete namespace $K8S_NAMESPACE +kubectl create namespace $K8S_NAMESPACE +printf "\n" + +echo "Deploying components and collecting environment variables..." +ENV_VARS_SCRIPT=tfs_bchain_runtime_env_vars.sh +echo "# Environment variables for TeraFlow Mock-Blockchain deployment" > $ENV_VARS_SCRIPT +PYTHONPATH=$(pwd)/src + +echo "Processing '$COMPONENT' component..." +IMAGE_NAME="$COMPONENT:$IMAGE_TAG" +IMAGE_URL=$(echo "$REGISTRY_IMAGE/$IMAGE_NAME" | sed 's,//,/,g' | sed 's,http:/,,g') + +echo " Building Docker image..." +BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}.log" +docker build -t "$IMAGE_NAME" -f ./src/dlt/mock_blockchain/Dockerfile . > "$BUILD_LOG" + +if [ -n "$REGISTRY_IMAGE" ]; then + echo " Pushing Docker image to '$REGISTRY_IMAGE'..." + + TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}.log" + docker tag "$IMAGE_NAME" "$IMAGE_URL" > "$TAG_LOG" + + PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}.log" + docker push "$IMAGE_URL" > "$PUSH_LOG" +fi + +echo " Adapting '$COMPONENT' manifest file..." +MANIFEST="$TMP_MANIFESTS_FOLDER/${COMPONENT}.yaml" +cp ./manifests/"${COMPONENT}".yaml "$MANIFEST" + +if [ -n "$REGISTRY_IMAGE" ]; then + # Registry is set + VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f3) + sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST" + sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Always#g" "$MANIFEST" +else + # Registry is not set + VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f3) + sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT:${VERSION}#image: $IMAGE_NAME#g" "$MANIFEST" + sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Never#g" "$MANIFEST" +fi + +echo " Deploying '$COMPONENT' component to Kubernetes..." +DEPLOY_LOG="$TMP_LOGS_FOLDER/deploy_${COMPONENT}.log" +kubectl --namespace $K8S_NAMESPACE apply -f "$MANIFEST" > "$DEPLOY_LOG" +COMPONENT_OBJNAME=$(echo "${COMPONENT}" | sed "s/\_/-/") +kubectl --namespace $K8S_NAMESPACE scale deployment --replicas=0 ${COMPONENT_OBJNAME} >> "$DEPLOY_LOG" +kubectl --namespace $K8S_NAMESPACE scale deployment --replicas=1 ${COMPONENT_OBJNAME} >> "$DEPLOY_LOG" + +echo " Collecting env-vars for '$COMPONENT' component..." +SERVICE_DATA=$(kubectl get service ${COMPONENT_OBJNAME} --namespace $K8S_NAMESPACE -o json) + +# Env vars for service's host address +SERVICE_HOST=$(echo ${SERVICE_DATA} | jq -r '.spec.clusterIP') +ENVVAR_HOST=$(echo "${COMPONENT}_SERVICE_HOST" | tr '[:lower:]' '[:upper:]') +echo "export ${ENVVAR_HOST}=${SERVICE_HOST}" >> $ENV_VARS_SCRIPT + +# Env vars for service's 'grpc' port +SERVICE_PORT_GRPC=$(echo ${SERVICE_DATA} | jq -r '.spec.ports[] | select(.name=="grpc") | .port') +ENVVAR_PORT_GRPC=$(echo "${COMPONENT}_SERVICE_PORT_GRPC" | tr '[:lower:]' '[:upper:]') +echo "export ${ENVVAR_PORT_GRPC}=${SERVICE_PORT_GRPC}" >> $ENV_VARS_SCRIPT + +printf "\n" + +echo "Waiting for '$COMPONENT' component..." +kubectl wait --namespace $K8S_NAMESPACE \ + --for='condition=available' --timeout=300s deployment/${COMPONENT_OBJNAME} +printf "\n" + +echo "Deployment Resources:" +kubectl --namespace $K8S_NAMESPACE get all +printf "\n" + +echo "Done!" diff --git a/manifests/dltservice.yaml b/manifests/dltservice.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5ef6eae7de6cb7c839b0cb17e65c8b3f045c1d66 --- /dev/null +++ b/manifests/dltservice.yaml @@ -0,0 +1,86 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: dltservice +spec: + selector: + matchLabels: + app: dltservice + template: + metadata: + labels: + app: dltservice + spec: + terminationGracePeriodSeconds: 5 + containers: + - name: connector + image: registry.gitlab.com/teraflow-h2020/controller/dlt-connector:latest + imagePullPolicy: Always + ports: + - containerPort: 8080 + env: + - name: LOG_LEVEL + value: "INFO" + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:8080"] + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:8080"] + resources: + requests: + cpu: 250m + memory: 512Mi + limits: + cpu: 700m + memory: 1024Mi + - name: gateway + image: registry.gitlab.com/teraflow-h2020/controller/dlt-gateway:latest + imagePullPolicy: Always + #readinessProbe: + # httpGet: + # path: /health + # port: 8081 + # initialDelaySeconds: 5 + # timeoutSeconds: 5 + #livenessProbe: + # httpGet: + # path: /health + # port: 8081 + # initialDelaySeconds: 5 + # timeoutSeconds: 5 + resources: + requests: + cpu: 250m + memory: 512Mi + limits: + cpu: 700m + memory: 1024Mi +--- +apiVersion: v1 +kind: Service +metadata: + name: dltservice +spec: + type: ClusterIP + selector: + app: dltservice + ports: + - name: grpc + protocol: TCP + port: 8080 + targetPort: 8080 diff --git a/manifests/mock_blockchain.yaml b/manifests/mock_blockchain.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b383d7db42be9eb3c9dc7758c230f5250eb43db1 --- /dev/null +++ b/manifests/mock_blockchain.yaml @@ -0,0 +1,64 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: mock-blockchain +spec: + selector: + matchLabels: + app: mock-blockchain + template: + metadata: + labels: + app: mock-blockchain + spec: + terminationGracePeriodSeconds: 5 + containers: + - name: server + image: registry.gitlab.com/teraflow-h2020/controller/mock_blockchain:latest + imagePullPolicy: Always + ports: + - containerPort: 50051 + env: + - name: LOG_LEVEL + value: "DEBUG" + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:50051"] + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:50051"] + resources: + requests: + cpu: 250m + memory: 512Mi + limits: + cpu: 700m + memory: 1024Mi +--- +apiVersion: v1 +kind: Service +metadata: + name: mock-blockchain +spec: + type: ClusterIP + selector: + app: mock-blockchain + ports: + - name: grpc + protocol: TCP + port: 50051 + targetPort: 50051 diff --git a/proto/dlt_gateway.proto b/proto/dlt_gateway.proto index b2c1297ccdd4c765862f4643b554d5373d8eccd3..84fe0fef6be366deb9286d49193ddb934c70a55c 100644 --- a/proto/dlt_gateway.proto +++ b/proto/dlt_gateway.proto @@ -21,8 +21,8 @@ service DltGatewayService { rpc RecordToDlt (DltRecord ) returns ( DltRecordStatus ) {} rpc GetFromDlt (DltRecordId ) returns ( DltRecord ) {} rpc SubscribeToDlt(DltRecordSubscription ) returns (stream DltRecordEvent ) {} - rpc GetDltStatus (context.TeraFlowController) returns ( DltPeerStatus ) {} // NEC is checkig if it is possible - rpc GetDltPeers (context.Empty ) returns ( DltPeerStatusList) {} // NEC is checkig if it is possible + rpc GetDltStatus (context.TeraFlowController) returns ( DltPeerStatus ) {} // NEC is checking if it is possible + rpc GetDltPeers (context.Empty ) returns ( DltPeerStatusList) {} // NEC is checking if it is possible } enum DltRecordTypeEnum { diff --git a/src/common/Constants.py b/src/common/Constants.py index f18d4384035f2310355d7a16c5a709720b5b07e9..a536ef60047eb1f210f8d98d207134d377adcbed 100644 --- a/src/common/Constants.py +++ b/src/common/Constants.py @@ -49,6 +49,9 @@ class ServiceNameEnum(Enum): PATHCOMP = 'pathcomp' WEBUI = 'webui' + # Used for test and debugging only + DLT_GATEWAY = 'dlt-gateway' + # Default gRPC service ports DEFAULT_SERVICE_GRPC_PORTS = { ServiceNameEnum.CONTEXT .value : 1010, @@ -63,6 +66,9 @@ DEFAULT_SERVICE_GRPC_PORTS = { ServiceNameEnum.CYBERSECURITY.value : 10000, ServiceNameEnum.INTERDOMAIN .value : 10010, ServiceNameEnum.PATHCOMP .value : 10020, + + # Used for test and debugging only + ServiceNameEnum.DLT_GATEWAY .value : 50051, } # Default HTTP/REST-API service ports diff --git a/src/common/tests/MockMessageBroker.py b/src/common/tests/MockMessageBroker.py new file mode 100644 index 0000000000000000000000000000000000000000..851c06766fd705bee746840f3d4ce9c4f4ac404d --- /dev/null +++ b/src/common/tests/MockMessageBroker.py @@ -0,0 +1,61 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json, logging, threading, time +from queue import Queue, Empty +from typing import Dict, Iterator, NamedTuple, Set + +LOGGER = logging.getLogger(__name__) +CONSUME_TIMEOUT = 0.1 # seconds + +class Message(NamedTuple): + topic: str + content: str + +class MockMessageBroker: + def __init__(self): + self._terminate = threading.Event() + self._topic__to__queues : Dict[str, Set[Queue]] = {} + + def publish(self, message : Message) -> None: + queues = self._topic__to__queues.get(message.topic, None) + if queues is None: return + for queue in queues: queue.put_nowait((message.topic, message.content)) + + def consume( + self, topic_names : Set[str], block : bool = True, consume_timeout : float = CONSUME_TIMEOUT + ) -> Iterator[Message]: + queue = Queue() + for topic_name in topic_names: + self._topic__to__queues.setdefault(topic_name, set()).add(queue) + + while not self._terminate.is_set(): + try: + message = queue.get(block=block, timeout=consume_timeout) + except Empty: + continue + if message is None: continue + yield Message(*message) + + for topic_name in topic_names: + self._topic__to__queues.get(topic_name, set()).discard(queue) + + def terminate(self): + self._terminate.set() + +def notify_event(messagebroker, topic_name, event_type, fields) -> None: + event = {'event': {'timestamp': time.time(), 'event_type': event_type}} + for field_name, field_value in fields.items(): + event[field_name] = field_value + messagebroker.publish(Message(topic_name, json.dumps(event))) diff --git a/src/common/tests/MockServicerImpl_Context.py b/src/common/tests/MockServicerImpl_Context.py index 9f80fdbcab0419072a4299f908a7b637038c2a1b..c56ed382adad4b2daa2e3d61575d2973f02bfbe2 100644 --- a/src/common/tests/MockServicerImpl_Context.py +++ b/src/common/tests/MockServicerImpl_Context.py @@ -12,22 +12,31 @@ # See the License for the specific language governing permissions and # limitations under the License. -import grpc, logging +import grpc, json, logging from typing import Any, Dict, Iterator, List from common.proto.context_pb2 import ( Connection, ConnectionEvent, ConnectionId, ConnectionIdList, ConnectionList, Context, ContextEvent, ContextId, ContextIdList, ContextList, Device, DeviceEvent, DeviceId, DeviceIdList, DeviceList, - Empty, + Empty, EventTypeEnum, Link, LinkEvent, LinkId, LinkIdList, LinkList, Service, ServiceEvent, ServiceId, ServiceIdList, ServiceList, Slice, SliceEvent, SliceId, SliceIdList, SliceList, Topology, TopologyEvent, TopologyId, TopologyIdList, TopologyList) from common.proto.context_pb2_grpc import ContextServiceServicer -from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tests.MockMessageBroker import MockMessageBroker, notify_event +from common.tools.grpc.Tools import grpc_message_to_json, grpc_message_to_json_string LOGGER = logging.getLogger(__name__) +TOPIC_CONNECTION = 'connection' +TOPIC_CONTEXT = 'context' +TOPIC_TOPOLOGY = 'topology' +TOPIC_DEVICE = 'device' +TOPIC_LINK = 'link' +TOPIC_SERVICE = 'service' +TOPIC_SLICE = 'slice' + def get_container(database : Dict[str, Dict[str, Any]], container_name : str) -> Dict[str, Any]: return database.setdefault(container_name, {}) @@ -35,10 +44,15 @@ def get_entries(database : Dict[str, Dict[str, Any]], container_name : str) -> L container = get_container(database, container_name) return [container[entry_uuid] for entry_uuid in sorted(container.keys())] +def has_entry(database : Dict[str, Dict[str, Any]], container_name : str, entry_uuid : str) -> Any: + LOGGER.debug('[has_entry] BEFORE database={:s}'.format(str(database))) + container = get_container(database, container_name) + return entry_uuid in container + def get_entry( context : grpc.ServicerContext, database : Dict[str, Dict[str, Any]], container_name : str, entry_uuid : str ) -> Any: - LOGGER.debug('[get_entry] AFTER database={:s}'.format(str(database))) + LOGGER.debug('[get_entry] BEFORE database={:s}'.format(str(database))) container = get_container(database, container_name) if entry_uuid not in container: context.abort(grpc.StatusCode.NOT_FOUND, str('{:s}({:s}) not found'.format(container_name, entry_uuid))) @@ -64,8 +78,27 @@ class MockServicerImpl_Context(ContextServiceServicer): def __init__(self): LOGGER.info('[__init__] Creating Servicer...') self.database : Dict[str, Any] = {} + self.msg_broker = MockMessageBroker() LOGGER.info('[__init__] Servicer Created') + # ----- Common ----------------------------------------------------------------------------------------------------- + + def _set(self, request, container_name, entry_uuid, entry_id_field_name, topic_name): + exists = has_entry(self.database, container_name, entry_uuid) + entry = set_entry(self.database, container_name, entry_uuid, request) + event_type = EventTypeEnum.EVENTTYPE_UPDATE if exists else EventTypeEnum.EVENTTYPE_CREATE + entry_id = getattr(entry, entry_id_field_name) + dict_entry_id = grpc_message_to_json(entry_id) + notify_event(self.msg_broker, topic_name, event_type, {entry_id_field_name: dict_entry_id}) + return entry_id + + def _del(self, request, container_name, entry_uuid, entry_id_field_name, topic_name, grpc_context): + empty = del_entry(grpc_context, self.database, container_name, entry_uuid) + event_type = EventTypeEnum.EVENTTYPE_REMOVE + dict_entry_id = grpc_message_to_json(request) + notify_event(self.msg_broker, topic_name, event_type, {entry_id_field_name: dict_entry_id}) + return empty + # ----- Context ---------------------------------------------------------------------------------------------------- def ListContextIds(self, request: Empty, context : grpc.ServicerContext) -> ContextIdList: @@ -82,14 +115,15 @@ class MockServicerImpl_Context(ContextServiceServicer): def SetContext(self, request: Context, context : grpc.ServicerContext) -> ContextId: LOGGER.info('[SetContext] request={:s}'.format(grpc_message_to_json_string(request))) - return set_entry(self.database, 'context', request.context_id.context_uuid.uuid, request).context_id + return self._set(request, 'context', request.context_uuid.uuid, 'context_id', TOPIC_CONTEXT) def RemoveContext(self, request: ContextId, context : grpc.ServicerContext) -> Empty: LOGGER.info('[RemoveContext] request={:s}'.format(grpc_message_to_json_string(request))) - return del_entry(context, self.database, 'context', request.context_uuid.uuid) + return self._del(request, 'context', request.context_uuid.uuid, 'context_id', TOPIC_CONTEXT, context) def GetContextEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[ContextEvent]: LOGGER.info('[GetContextEvents] request={:s}'.format(grpc_message_to_json_string(request))) + for message in self.msg_broker.consume({TOPIC_CONTEXT}): yield ContextEvent(**json.loads(message.content)) # ----- Topology --------------------------------------------------------------------------------------------------- @@ -112,15 +146,18 @@ class MockServicerImpl_Context(ContextServiceServicer): def SetTopology(self, request: Topology, context : grpc.ServicerContext) -> TopologyId: LOGGER.info('[SetTopology] request={:s}'.format(grpc_message_to_json_string(request))) container_name = 'topology[{:s}]'.format(str(request.topology_id.context_id.context_uuid.uuid)) - return set_entry(self.database, container_name, request.topology_id.topology_uuid.uuid, request).topology_id + topology_uuid = request.topology_id.topology_uuid.uuid + return self._set(request, container_name, topology_uuid, 'topology_id', TOPIC_TOPOLOGY) def RemoveTopology(self, request: TopologyId, context : grpc.ServicerContext) -> Empty: LOGGER.info('[RemoveTopology] request={:s}'.format(grpc_message_to_json_string(request))) container_name = 'topology[{:s}]'.format(str(request.context_id.context_uuid.uuid)) - return del_entry(context, self.database, container_name, request.topology_uuid.uuid) + topology_uuid = request.topology_uuid.uuid + return self._del(request, container_name, topology_uuid, 'topology_id', TOPIC_TOPOLOGY, context) def GetTopologyEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[TopologyEvent]: LOGGER.info('[GetTopologyEvents] request={:s}'.format(grpc_message_to_json_string(request))) + for message in self.msg_broker.consume({TOPIC_TOPOLOGY}): yield TopologyEvent(**json.loads(message.content)) # ----- Device ----------------------------------------------------------------------------------------------------- @@ -139,14 +176,15 @@ class MockServicerImpl_Context(ContextServiceServicer): def SetDevice(self, request: Context, context : grpc.ServicerContext) -> DeviceId: LOGGER.info('[SetDevice] request={:s}'.format(grpc_message_to_json_string(request))) - return set_entry(self.database, 'device', request.device_id.device_uuid.uuid, request).device_id + return self._set(request, 'device', request.device_id.device_uuid.uuid, 'device_id', TOPIC_DEVICE) def RemoveDevice(self, request: DeviceId, context : grpc.ServicerContext) -> Empty: LOGGER.info('[RemoveDevice] request={:s}'.format(grpc_message_to_json_string(request))) - return del_entry(context, self.database, 'device', request.device_uuid.uuid) + return self._del(request, 'device', request.device_uuid.uuid, 'device_id', TOPIC_DEVICE, context) def GetDeviceEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[DeviceEvent]: LOGGER.info('[GetDeviceEvents] request={:s}'.format(grpc_message_to_json_string(request))) + for message in self.msg_broker.consume({TOPIC_DEVICE}): yield DeviceEvent(**json.loads(message.content)) # ----- Link ------------------------------------------------------------------------------------------------------- @@ -165,14 +203,15 @@ class MockServicerImpl_Context(ContextServiceServicer): def SetLink(self, request: Context, context : grpc.ServicerContext) -> LinkId: LOGGER.info('[SetLink] request={:s}'.format(grpc_message_to_json_string(request))) - return set_entry(self.database, 'link', request.link_id.link_uuid.uuid, request).link_id + return self._set(request, 'link', request.link_id.link_uuid.uuid, 'link_id', TOPIC_LINK) def RemoveLink(self, request: LinkId, context : grpc.ServicerContext) -> Empty: LOGGER.info('[RemoveLink] request={:s}'.format(grpc_message_to_json_string(request))) - return del_entry(context, self.database, 'link', request.link_uuid.uuid) + return self._del(request, 'link', request.link_uuid.uuid, 'link_id', TOPIC_LINK, context) def GetLinkEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[LinkEvent]: LOGGER.info('[GetLinkEvents] request={:s}'.format(grpc_message_to_json_string(request))) + for message in self.msg_broker.consume({TOPIC_LINK}): yield LinkEvent(**json.loads(message.content)) # ----- Slice ------------------------------------------------------------------------------------------------------ @@ -226,17 +265,19 @@ class MockServicerImpl_Context(ContextServiceServicer): def SetService(self, request: Service, context : grpc.ServicerContext) -> ServiceId: LOGGER.info('[SetService] request={:s}'.format(grpc_message_to_json_string(request))) - return set_entry( - self.database, 'service[{:s}]'.format(str(request.service_id.context_id.context_uuid.uuid)), - request.service_id.service_uuid.uuid, request).service_id + container_name = 'service[{:s}]'.format(str(request.service_id.context_id.context_uuid.uuid)) + service_uuid = request.service_id.service_uuid.uuid + return self._set(request, container_name, service_uuid, 'service_id', TOPIC_SERVICE) def RemoveService(self, request: ServiceId, context : grpc.ServicerContext) -> Empty: LOGGER.info('[RemoveService] request={:s}'.format(grpc_message_to_json_string(request))) container_name = 'service[{:s}]'.format(str(request.context_id.context_uuid.uuid)) - return del_entry(context, self.database, container_name, request.service_uuid.uuid) + service_uuid = request.service_id.service_uuid.uuid + return self._del(request, container_name, service_uuid, 'service_id', TOPIC_SERVICE, context) def GetServiceEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[ServiceEvent]: LOGGER.info('[GetServiceEvents] request={:s}'.format(grpc_message_to_json_string(request))) + for message in self.msg_broker.consume({TOPIC_SERVICE}): yield ServiceEvent(**json.loads(message.content)) # ----- Connection ------------------------------------------------------------------------------------------------- @@ -259,21 +300,21 @@ class MockServicerImpl_Context(ContextServiceServicer): def SetConnection(self, request: Connection, context : grpc.ServicerContext) -> ConnectionId: LOGGER.info('[SetConnection] request={:s}'.format(grpc_message_to_json_string(request))) - service_connection__container_name = 'service_connection[{:s}/{:s}]'.format( + container_name = 'service_connection[{:s}/{:s}]'.format( str(request.service_id.context_id.context_uuid.uuid), str(request.service_id.service_uuid.uuid)) - set_entry( - self.database, service_connection__container_name, request.connection_id.connection_uuid.uuid, request) - return set_entry( - self.database, 'connection', request.connection_id.connection_uuid.uuid, request).connection_id + connection_uuid = request.connection_id.connection_uuid.uuid + set_entry(self.database, container_name, connection_uuid, request) + return self._set(request, 'connection', connection_uuid, 'connection_id', TOPIC_CONNECTION) def RemoveConnection(self, request: ConnectionId, context : grpc.ServicerContext) -> Empty: LOGGER.info('[RemoveConnection] request={:s}'.format(grpc_message_to_json_string(request))) connection = get_entry(context, self.database, 'connection', request.connection_uuid.uuid) - service_id = connection.service_id - service_connection__container_name = 'service_connection[{:s}/{:s}]'.format( - str(service_id.context_id.context_uuid.uuid), str(service_id.service_uuid.uuid)) - del_entry(context, self.database, service_connection__container_name, request.connection_uuid.uuid) - return del_entry(context, self.database, 'connection', request.connection_uuid.uuid) + container_name = 'service_connection[{:s}/{:s}]'.format( + str(connection.service_id.context_id.context_uuid.uuid), str(connection.service_id.service_uuid.uuid)) + connection_uuid = request.connection_uuid.uuid + del_entry(context, self.database, container_name, connection_uuid) + return self._del(request, 'connection', connection_uuid, 'connection_id', TOPIC_CONNECTION, context) def GetConnectionEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[ConnectionEvent]: LOGGER.info('[GetConnectionEvents] request={:s}'.format(grpc_message_to_json_string(request))) + for message in self.msg_broker.consume({TOPIC_CONNECTION}): yield ConnectionEvent(**json.loads(message.content)) diff --git a/src/common/tests/MockServicerImpl_DltGateway.py b/src/common/tests/MockServicerImpl_DltGateway.py new file mode 100644 index 0000000000000000000000000000000000000000..2d750168238b2a041badd1974f27e57f62363d90 --- /dev/null +++ b/src/common/tests/MockServicerImpl_DltGateway.py @@ -0,0 +1,165 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import grpc, itertools, json, logging, time +from typing import Any, Dict, Iterator, Optional, Tuple +from common.tests.MockMessageBroker import Message, MockMessageBroker +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.proto.context_pb2 import EVENTTYPE_CREATE, EVENTTYPE_REMOVE, EVENTTYPE_UPDATE, Empty, TeraFlowController +from common.proto.dlt_gateway_pb2 import ( + DLTRECORDOPERATION_ADD, DLTRECORDOPERATION_DELETE, DLTRECORDOPERATION_UNDEFINED, DLTRECORDOPERATION_UPDATE, + DLTRECORDSTATUS_FAILED, DLTRECORDSTATUS_SUCCEEDED, DLTRECORDTYPE_CONTEXT, DLTRECORDTYPE_DEVICE, DLTRECORDTYPE_LINK, + DLTRECORDTYPE_SERVICE, DLTRECORDTYPE_SLICE, DLTRECORDTYPE_TOPOLOGY, DLTRECORDTYPE_UNDEFINED, + DltPeerStatus, DltPeerStatusList, DltRecord, DltRecordEvent, DltRecordId, DltRecordOperationEnum, DltRecordStatus, + DltRecordSubscription, DltRecordTypeEnum) +from common.proto.dlt_gateway_pb2_grpc import DltGatewayServiceServicer + +LOGGER = logging.getLogger(__name__) + +DltRecordKey = Tuple[str, Any, str] # domain_uuid, DltRecordOperationEnum, record_uuid +DltRecordDict = Dict[DltRecordKey, DltRecord] # dlt_record_key => dlt_record + +class AlreadyExistsException(Exception): + pass + +class DoesNotExistException(Exception): + pass + +class MockServicerImpl_DltGateway(DltGatewayServiceServicer): + def __init__(self): + LOGGER.info('[__init__] Creating Servicer...') + self.records : DltRecordDict = {} + self.msg_broker = MockMessageBroker() + LOGGER.info('[__init__] Servicer Created') + + def __get_record(self, record_id : DltRecordId, should_exist : bool) -> Optional[Dict]: + domain_uuid, record_uuid = record_id.domain_uuid.uuid, record_id.record_uuid.uuid + str_type = DltRecordTypeEnum.Name(record_id.type).upper().replace('DLTRECORDTYPE_', '') + records_domain : Dict[str, Dict] = self.records.setdefault(domain_uuid, {}) + records_type : Dict[str, Dict] = records_domain.setdefault(str_type, {}) + record : Optional[Dict] = records_type.get(record_uuid) + if should_exist and record is None: + raise DoesNotExistException('RecordId({:s}, {:s}, {:s})'.format(domain_uuid, str_type, record_uuid)) + elif not should_exist and record is not None: + raise AlreadyExistsException('RecordId({:s}, {:s}, {:s})'.format(domain_uuid, str_type, record_uuid)) + return record + + def __set_record(self, record_id : DltRecordId, should_exist : bool, data_json : str) -> None: + domain_uuid, record_uuid = record_id.domain_uuid.uuid, record_id.record_uuid.uuid + str_type = DltRecordTypeEnum.Name(record_id.type).upper().replace('DLTRECORDTYPE_', '') + records_domain : Dict[str, Dict] = self.records.setdefault(domain_uuid, {}) + records_type : Dict[str, Dict] = records_domain.setdefault(str_type, {}) + record : Optional[Dict] = records_type.get(record_uuid) + if should_exist and record is None: + raise DoesNotExistException('RecordId({:s}, {:s}, {:s})'.format(domain_uuid, str_type, record_uuid)) + elif not should_exist and record is not None: + raise AlreadyExistsException('RecordId({:s}, {:s}, {:s})'.format(domain_uuid, str_type, record_uuid)) + records_type[record_uuid] = json.loads(data_json) + + def __del_record(self, record_id : DltRecordId) -> None: + domain_uuid, record_uuid = record_id.domain_uuid.uuid, record_id.record_uuid.uuid + str_type = DltRecordTypeEnum.Name(record_id.type).upper().replace('DLTRECORDTYPE_', '') + records_domain : Dict[str, Dict] = self.records.setdefault(domain_uuid, {}) + records_type : Dict[str, Dict] = records_domain.setdefault(str_type, {}) + record : Optional[Dict] = records_type.get(record_uuid) + if record is None: + raise DoesNotExistException('RecordId({:s}, {:s}, {:s})'.format(domain_uuid, str_type, record_uuid)) + records_type.discard(record_uuid) + + def __publish(self, operation : DltRecordOperationEnum, record_id : DltRecordId) -> None: + str_operation = DltRecordOperationEnum.Name(operation).upper().replace('DLTRECORDOPERATION_', '') + str_type = DltRecordTypeEnum.Name(record_id.type).upper().replace('DLTRECORDTYPE_', '') + topic = '{:s}:{:s}'.format(str_type, str_operation) + event = DltRecordEvent() + event.event.timestamp.timestamp = time.time() # pylint: disable=no-member + event.event.event_type = { # pylint: disable=no-member + DLTRECORDOPERATION_ADD : EVENTTYPE_CREATE, + DLTRECORDOPERATION_UPDATE: EVENTTYPE_UPDATE, + DLTRECORDOPERATION_DELETE: EVENTTYPE_REMOVE, + }.get(operation) + event.record_id.CopyFrom(record_id) # pylint: disable=no-member + self.msg_broker.publish(Message(topic=topic, content=grpc_message_to_json_string(event))) + + def RecordToDlt(self, request : DltRecord, context : grpc.ServicerContext) -> DltRecordStatus: + LOGGER.info('[RecordToDlt] request={:s}'.format(grpc_message_to_json_string(request))) + record_id = request.record_id + response = DltRecordStatus() + response.record_id.CopyFrom(record_id) # pylint: disable=no-member + try: + operation : DltRecordOperationEnum = request.operation + if operation == DLTRECORDOPERATION_ADD: + self.__set_record(record_id, False, request.data_json) + elif operation == DLTRECORDOPERATION_UPDATE: + self.__set_record(record_id, True, request.data_json) + elif operation == DLTRECORDOPERATION_DELETE: + self.__del_record(record_id) + else: + str_operation = DltRecordOperationEnum.Name(operation).upper().replace('DLTRECORDOPERATION_', '') + raise NotImplementedError('DltRecordOperationEnum({:s})'.format(str_operation)) + self.__publish(operation, record_id) + response.status = DLTRECORDSTATUS_SUCCEEDED + except Exception as e: # pylint: disable=broad-except + response.status = DLTRECORDSTATUS_FAILED + response.error_message = str(e) + LOGGER.info('[RecordToDlt] response={:s}'.format(grpc_message_to_json_string(response))) + return response + + def GetFromDlt(self, request : DltRecordId, context : grpc.ServicerContext) -> DltRecord: + LOGGER.info('[GetFromDlt] request={:s}'.format(grpc_message_to_json_string(request))) + record = self.__get_record(request, True) + response = DltRecord() + response.record_id.CopyFrom(request) # pylint: disable=no-member + response.operation = DLTRECORDOPERATION_UNDEFINED + response.data_json = json.dumps(record, sort_keys=True) + LOGGER.info('[GetFromDlt] response={:s}'.format(grpc_message_to_json_string(response))) + return response + + def SubscribeToDlt( + self, request: DltRecordSubscription, context : grpc.ServicerContext + ) -> Iterator[DltRecordEvent]: + LOGGER.info('[SubscribeToDlt] request={:s}'.format(grpc_message_to_json_string(request))) + types = request.type + if len(types) == 0: + types = [ + DLTRECORDTYPE_UNDEFINED, DLTRECORDTYPE_CONTEXT, DLTRECORDTYPE_TOPOLOGY, DLTRECORDTYPE_DEVICE, + DLTRECORDTYPE_LINK, DLTRECORDTYPE_SERVICE, DLTRECORDTYPE_SLICE + ] + str_types = [ + DltRecordTypeEnum.Name(_type).upper().replace('DLTRECORDTYPE_', '') + for _type in types + ] + operations = request.operation + if len(operations) == 0: + operations = [ + DLTRECORDOPERATION_UNDEFINED, DLTRECORDOPERATION_ADD, DLTRECORDOPERATION_UPDATE, + DLTRECORDOPERATION_DELETE + ] + str_operations = [ + DltRecordOperationEnum.Name(_operation).upper().replace('DLTRECORDOPERATION_', '') + for _operation in operations + ] + topics = { + '{:s}:{:s}'.format(*type_operation) + for type_operation in itertools.product(str_types, str_operations) + } + for message in self.msg_broker.consume(topics): + yield DltRecordEvent(**json.loads(message.content)) + + def GetDltStatus(self, request : TeraFlowController, context : grpc.ServicerContext) -> DltPeerStatus: + LOGGER.info('[GetDltStatus] request={:s}'.format(grpc_message_to_json_string(request))) + raise NotImplementedError() + + def GetDltPeers(self, request : Empty, context : grpc.ServicerContext) -> DltPeerStatusList: + LOGGER.info('[GetDltPeers] request={:s}'.format(grpc_message_to_json_string(request))) + raise NotImplementedError() diff --git a/src/common/tools/object_factory/Link.py b/src/common/tools/object_factory/Link.py index 922a39dbe24f4f4b635f378180ab13c80322801b..12c233464f575fefdaa13afe457ca1ae500f15b6 100644 --- a/src/common/tools/object_factory/Link.py +++ b/src/common/tools/object_factory/Link.py @@ -13,15 +13,21 @@ # limitations under the License. import copy -from typing import Dict, List +from typing import Dict, List, Tuple def get_link_uuid(a_endpoint_id : Dict, z_endpoint_id : Dict) -> str: return '{:s}/{:s}=={:s}/{:s}'.format( a_endpoint_id['device_id']['device_uuid']['uuid'], a_endpoint_id['endpoint_uuid']['uuid'], z_endpoint_id['device_id']['device_uuid']['uuid'], z_endpoint_id['endpoint_uuid']['uuid']) -def json_link_id(link_uuid : str): +def json_link_id(link_uuid : str) -> Dict: return {'link_uuid': {'uuid': link_uuid}} -def json_link(link_uuid : str, endpoint_ids : List[Dict]): +def json_link(link_uuid : str, endpoint_ids : List[Dict]) -> Dict: return {'link_id': json_link_id(link_uuid), 'link_endpoint_ids': copy.deepcopy(endpoint_ids)} + +def compose_link(endpoint_a, endpoint_z) -> Tuple[Dict, Dict]: + link_uuid = get_link_uuid(endpoint_a['endpoint_id'], endpoint_z['endpoint_id']) + link_id = json_link_id(link_uuid) + link = json_link(link_uuid, [endpoint_a['endpoint_id'], endpoint_z['endpoint_id']]) + return link_id, link diff --git a/src/context/client/EventsCollector.py b/src/context/client/EventsCollector.py index 14a297231f757771beb2c01bc557e5e3de0defb0..9715098bd3cd979d78a83b4839e40613d3997d1e 100644 --- a/src/context/client/EventsCollector.py +++ b/src/context/client/EventsCollector.py @@ -22,26 +22,57 @@ LOGGER.setLevel(logging.DEBUG) class EventsCollector: def __init__( - self, context_client_grpc : ContextClient, log_events_received=False + self, context_client : ContextClient, + log_events_received : bool = False, + activate_context_collector : bool = True, + activate_topology_collector : bool = True, + activate_device_collector : bool = True, + activate_link_collector : bool = True, + activate_service_collector : bool = True, + activate_slice_collector : bool = True, + activate_connection_collector : bool = True, + ) -> None: self._events_queue = queue.Queue() self._log_events_received = log_events_received - self._context_stream = context_client_grpc.GetContextEvents(Empty()) - self._topology_stream = context_client_grpc.GetTopologyEvents(Empty()) - self._device_stream = context_client_grpc.GetDeviceEvents(Empty()) - self._link_stream = context_client_grpc.GetLinkEvents(Empty()) - self._service_stream = context_client_grpc.GetServiceEvents(Empty()) - self._slice_stream = context_client_grpc.GetSliceEvents(Empty()) - self._connection_stream = context_client_grpc.GetConnectionEvents(Empty()) - - self._context_thread = threading.Thread(target=self._collect, args=(self._context_stream ,), daemon=False) - self._topology_thread = threading.Thread(target=self._collect, args=(self._topology_stream ,), daemon=False) - self._device_thread = threading.Thread(target=self._collect, args=(self._device_stream ,), daemon=False) - self._link_thread = threading.Thread(target=self._collect, args=(self._link_stream ,), daemon=False) - self._service_thread = threading.Thread(target=self._collect, args=(self._service_stream ,), daemon=False) - self._slice_thread = threading.Thread(target=self._collect, args=(self._slice_stream ,), daemon=False) - self._connection_thread = threading.Thread(target=self._collect, args=(self._connection_stream,), daemon=False) + self._context_stream, self._context_thread = None, None + if activate_context_collector: + self._context_stream = context_client.GetContextEvents(Empty()) + self._context_thread = self._create_collector_thread(self._context_stream) + + self._topology_stream, self._topology_thread = None, None + if activate_topology_collector: + self._topology_stream = context_client.GetTopologyEvents(Empty()) + self._topology_thread = self._create_collector_thread(self._topology_stream) + + self._device_stream, self._device_thread = None, None + if activate_device_collector: + self._device_stream = context_client.GetDeviceEvents(Empty()) + self._device_thread = self._create_collector_thread(self._device_stream) + + self._link_stream, self._link_thread = None, None + if activate_link_collector: + self._link_stream = context_client.GetLinkEvents(Empty()) + self._link_thread = self._create_collector_thread(self._link_stream) + + self._service_stream, self._service_thread = None, None + if activate_service_collector: + self._service_stream = context_client.GetServiceEvents(Empty()) + self._service_thread = self._create_collector_thread(self._service_stream) + + self._slice_stream, self._slice_thread = None, None + if activate_slice_collector: + self._slice_stream = context_client.GetSliceEvents(Empty()) + self._slice_thread = self._create_collector_thread(self._slice_stream) + + self._connection_stream, self._connection_thread = None, None + if activate_connection_collector: + self._connection_stream = context_client.GetConnectionEvents(Empty()) + self._connection_thread = self._create_collector_thread(self._connection_stream) + + def _create_collector_thread(self, stream, as_daemon : bool = False): + return threading.Thread(target=self._collect, args=(stream,), daemon=as_daemon) def _collect(self, events_stream) -> None: try: @@ -54,13 +85,13 @@ class EventsCollector: raise # pragma: no cover def start(self): - self._context_thread.start() - self._topology_thread.start() - self._device_thread.start() - self._link_thread.start() - self._service_thread.start() - self._slice_thread.start() - self._connection_thread.start() + if self._context_thread is not None: self._context_thread.start() + if self._topology_thread is not None: self._topology_thread.start() + if self._device_thread is not None: self._device_thread.start() + if self._link_thread is not None: self._link_thread.start() + if self._service_thread is not None: self._service_thread.start() + if self._slice_thread is not None: self._slice_thread.start() + if self._connection_thread is not None: self._connection_thread.start() def get_event(self, block : bool = True, timeout : float = 0.1): try: @@ -83,18 +114,18 @@ class EventsCollector: return sorted(events, key=lambda e: e.event.timestamp.timestamp) def stop(self): - self._context_stream.cancel() - self._topology_stream.cancel() - self._device_stream.cancel() - self._link_stream.cancel() - self._service_stream.cancel() - self._slice_stream.cancel() - self._connection_stream.cancel() - - self._context_thread.join() - self._topology_thread.join() - self._device_thread.join() - self._link_thread.join() - self._service_thread.join() - self._slice_thread.join() - self._connection_thread.join() + if self._context_stream is not None: self._context_stream.cancel() + if self._topology_stream is not None: self._topology_stream.cancel() + if self._device_stream is not None: self._device_stream.cancel() + if self._link_stream is not None: self._link_stream.cancel() + if self._service_stream is not None: self._service_stream.cancel() + if self._slice_stream is not None: self._slice_stream.cancel() + if self._connection_stream is not None: self._connection_stream.cancel() + + if self._context_thread is not None: self._context_thread.join() + if self._topology_thread is not None: self._topology_thread.join() + if self._device_thread is not None: self._device_thread.join() + if self._link_thread is not None: self._link_thread.join() + if self._service_thread is not None: self._service_thread.join() + if self._slice_thread is not None: self._slice_thread.join() + if self._connection_thread is not None: self._connection_thread.join() diff --git a/src/dlt/.gitlab-ci.yml b/src/dlt/.gitlab-ci.yml new file mode 100644 index 0000000000000000000000000000000000000000..3c2013f50904eb9cd366bf3e3b3cfce6d10c6fd6 --- /dev/null +++ b/src/dlt/.gitlab-ci.yml @@ -0,0 +1,184 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Build, tag, and push the Docker image to the GitLab Docker registry +build dlt: + variables: + IMAGE_NAME: 'dlt' # name of the microservice + IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) + stage: build + before_script: + - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY + script: + # This first build tags the builder resulting image to prevent being removed by dangling image removal command + - docker build -t "${IMAGE_NAME}-gateway:$IMAGE_TAG" -f ./src/$IMAGE_NAME/gateway/Dockerfile . + - docker build -t "${IMAGE_NAME}-connector:$IMAGE_TAG" -f ./src/$IMAGE_NAME/connector/Dockerfile . + - docker tag "${IMAGE_NAME}-gateway:$IMAGE_TAG" "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-gateway:$IMAGE_TAG" + - docker tag "${IMAGE_NAME}-connector:$IMAGE_TAG" "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-connector:$IMAGE_TAG" + - docker push "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-gateway:$IMAGE_TAG" + - docker push "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-connector:$IMAGE_TAG" + after_script: + - docker images --filter="dangling=true" --quiet | xargs -r docker rmi + rules: + - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' + - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' + - changes: + - src/common/**/*.py + - proto/*.proto + - src/$IMAGE_NAME/.gitlab-ci.yml + - src/$IMAGE_NAME/gateway/**/*.{kt,kts,proto,pem,json} + - src/$IMAGE_NAME/gateway/build.gradle.kts + - src/$IMAGE_NAME/gateway/Dockerfile + - src/$IMAGE_NAME/gateway/gradle.properties + - src/$IMAGE_NAME/gateway/gradlew + - src/$IMAGE_NAME/gateway/gradlew.bat + - src/$IMAGE_NAME/gateway/settings.gradle.kts + - src/$IMAGE_NAME/connector/**/*.{py,in,yml} + - src/$IMAGE_NAME/connector/Dockerfile + - src/$IMAGE_NAME/connector/tests/*.py + - manifests/${IMAGE_NAME}service.yaml + - .gitlab-ci.yml + +# Apply unit test to the component +unit test dlt-gateway: + variables: + IMAGE_NAME: 'dlt' # name of the microservice + IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) + stage: unit_test + needs: + - build dlt + before_script: + - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY + - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create -d bridge teraflowbridge; fi + - if docker container ls | grep ${IMAGE_NAME}-connector; then docker rm -f ${IMAGE_NAME}-connector; else echo "${IMAGE_NAME}-connector image is not in the system"; fi + - if docker container ls | grep ${IMAGE_NAME}-gateway; then docker rm -f ${IMAGE_NAME}-gateway; else echo "${IMAGE_NAME}-gateway image is not in the system"; fi + script: + - docker pull "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-gateway:$IMAGE_TAG" + #- docker run --name ${IMAGE_NAME}-gateway -d -p 50051:50051 -v "$PWD/src/${IMAGE_NAME}/gateway/tests:/opt/results" --network=teraflowbridge ${IMAGE_NAME}-gateway:${IMAGE_TAG} + - docker run --name ${IMAGE_NAME}-gateway -d -p 50051:50051 --network=teraflowbridge ${IMAGE_NAME}-gateway:${IMAGE_TAG} + - sleep 5 + - docker ps -a + - docker logs ${IMAGE_NAME}-gateway + #- docker exec -i ${IMAGE_NAME}-gateway bash -c "curl -0 -v -X POST -H 'Expect:' -H 'Content-Type:\ application/json' http://127.0.0.1:8081/dlt/api/v1/compRoute -d @/var/teraflow/tests/pc-req.json" + #- docker kill --signal=SIGUSR1 dlt-gateway + #- docker exec -i ${IMAGE_NAME}-gateway bash -c "gcovr" + #coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/' + after_script: + - docker logs ${IMAGE_NAME}-gateway + - docker rm -f ${IMAGE_NAME}-gateway + - docker network rm teraflowbridge + rules: + - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' + - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' + - changes: + - src/common/**/*.py + - proto/*.proto + - src/$IMAGE_NAME/.gitlab-ci.yml + - src/$IMAGE_NAME/gateway/**/*.{kt,kts,proto,pem,json} + - src/$IMAGE_NAME/gateway/build.gradle.kts + - src/$IMAGE_NAME/gateway/Dockerfile + - src/$IMAGE_NAME/gateway/gradle.properties + - src/$IMAGE_NAME/gateway/gradlew + - src/$IMAGE_NAME/gateway/gradlew.bat + - src/$IMAGE_NAME/gateway/settings.gradle.kts + - manifests/${IMAGE_NAME}service.yaml + - .gitlab-ci.yml + #artifacts: + # when: always + # reports: + # junit: src/$IMAGE_NAME/gateway/tests/${IMAGE_NAME}-gateway_report.xml + +# Apply unit test to the component +unit test dlt-connector: + variables: + IMAGE_NAME: 'dlt' # name of the microservice + IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) + stage: unit_test + needs: + - build dlt + before_script: + - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY + - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create --driver=bridge --subnet=172.28.0.0/24 --gateway=172.28.0.254 teraflowbridge; fi + - if docker container ls | grep ${IMAGE_NAME}-connector; then docker rm -f ${IMAGE_NAME}-connector; else echo "${IMAGE_NAME}-connector image is not in the system"; fi + - if docker container ls | grep ${IMAGE_NAME}-gateway; then docker rm -f ${IMAGE_NAME}-gateway; else echo "${IMAGE_NAME}-gateway image is not in the system"; fi + script: + - docker pull "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-connector:$IMAGE_TAG" + - docker pull "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-gateway:$IMAGE_TAG" + - docker run --name ${IMAGE_NAME}-gateway -d -p 50051:50051 -v "$PWD/src/${IMAGE_NAME}/gateway/tests:/opt/results" --network=teraflowbridge --ip 172.28.0.1 $CI_REGISTRY_IMAGE/${IMAGE_NAME}-gateway:$IMAGE_TAG + - sleep 1 + - docker run --name ${IMAGE_NAME}-connector -d -p 8080:8080 --env "DLT_GATEWAY_HOST=172.28.0.1" --env "DLT_GATEWAY_PORT=50051" -v "$PWD/src/${IMAGE_NAME}/connector/tests:/opt/results" --network=teraflowbridge --ip 172.28.0.2 $CI_REGISTRY_IMAGE/${IMAGE_NAME}-connector:$IMAGE_TAG + - sleep 5 + - docker ps -a + - docker logs ${IMAGE_NAME}-connector + - docker logs ${IMAGE_NAME}-gateway + - docker exec -i ${IMAGE_NAME}-connector bash -c "coverage run -m pytest --log-level=INFO --verbose $IMAGE_NAME/connector/tests/test_unitary.py --junitxml=/opt/results/${IMAGE_NAME}-connector_report.xml" + - docker exec -i ${IMAGE_NAME}-connector bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing" + coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/' + after_script: + - docker ps -a + - docker logs ${IMAGE_NAME}-connector + - docker logs ${IMAGE_NAME}-gateway + - docker rm -f ${IMAGE_NAME}-connector + - docker rm -f ${IMAGE_NAME}-gateway + - docker network rm teraflowbridge + rules: + - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' + - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' + - changes: + - src/common/**/*.py + - proto/*.proto + - src/$IMAGE_NAME/.gitlab-ci.yml + - src/$IMAGE_NAME/gateway/**/*.{kt,kts,proto,pem,json} + - src/$IMAGE_NAME/gateway/build.gradle.kts + - src/$IMAGE_NAME/gateway/Dockerfile + - src/$IMAGE_NAME/gateway/gradle.properties + - src/$IMAGE_NAME/gateway/gradlew + - src/$IMAGE_NAME/gateway/gradlew.bat + - src/$IMAGE_NAME/gateway/settings.gradle.kts + - src/$IMAGE_NAME/connector/**/*.{py,in,yml} + - src/$IMAGE_NAME/connector/Dockerfile + - src/$IMAGE_NAME/connector/tests/*.py + - manifests/${IMAGE_NAME}service.yaml + - .gitlab-ci.yml + artifacts: + when: always + reports: + junit: src/$IMAGE_NAME/connector/tests/${IMAGE_NAME}-connector_report.xml + +# Deployment of the service in Kubernetes Cluster +deploy dlt: + variables: + IMAGE_NAME: 'dlt' # name of the microservice + IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) + stage: deploy + needs: + - unit test dlt-gateway + - unit test dlt-connector + # - integ_test execute + script: + - 'sed -i "s/$IMAGE_NAME:.*/$IMAGE_NAME:$IMAGE_TAG/" manifests/${IMAGE_NAME}service.yaml' + - kubectl version + - kubectl get all + - kubectl apply -f "manifests/${IMAGE_NAME}service.yaml" + - kubectl get all + # environment: + # name: test + # url: https://example.com + # kubernetes: + # namespace: test + rules: + - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' + when: manual + - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' + when: manual diff --git a/src/dlt/connector/Config.py b/src/dlt/connector/Config.py new file mode 100644 index 0000000000000000000000000000000000000000..9953c820575d42fa88351cc8de022d880ba96e6a --- /dev/null +++ b/src/dlt/connector/Config.py @@ -0,0 +1,13 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/src/dlt/connector/Dockerfile b/src/dlt/connector/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..51e9ec506f0c8a6c35ceac68833e3ad683ef8e63 --- /dev/null +++ b/src/dlt/connector/Dockerfile @@ -0,0 +1,69 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM python:3.9-slim + +# Install dependencies +RUN apt-get --yes --quiet --quiet update && \ + apt-get --yes --quiet --quiet install wget g++ && \ + rm -rf /var/lib/apt/lists/* + +# Set Python to show logs as they occur +ENV PYTHONUNBUFFERED=0 + +# Download the gRPC health probe +RUN GRPC_HEALTH_PROBE_VERSION=v0.2.0 && \ + wget -qO/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-amd64 && \ + chmod +x /bin/grpc_health_probe + +# Get generic Python packages +RUN python3 -m pip install --upgrade pip +RUN python3 -m pip install --upgrade setuptools wheel +RUN python3 -m pip install --upgrade pip-tools + +# Get common Python packages +# Note: this step enables sharing the previous Docker build steps among all the Python components +WORKDIR /var/teraflow +COPY common_requirements.in common_requirements.in +RUN pip-compile --quiet --output-file=common_requirements.txt common_requirements.in +RUN python3 -m pip install -r common_requirements.txt + +# Add common files into working directory +WORKDIR /var/teraflow/common +COPY src/common/. ./ +RUN rm -rf proto + +# Create proto sub-folder, copy .proto files, and generate Python code +RUN mkdir -p /var/teraflow/common/proto +WORKDIR /var/teraflow/common/proto +RUN touch __init__.py +COPY proto/*.proto ./ +RUN python3 -m grpc_tools.protoc -I=. --python_out=. --grpc_python_out=. *.proto +RUN rm *.proto +RUN find . -type f -exec sed -i -E 's/(import\ .*)_pb2/from . \1_pb2/g' {} \; + +# Create component sub-folders, get specific Python packages +RUN mkdir -p /var/teraflow/dlt/connector +WORKDIR /var/teraflow/dlt/connector +COPY src/dlt/connector/requirements.in requirements.in +RUN pip-compile --quiet --output-file=requirements.txt requirements.in +RUN python3 -m pip install -r requirements.txt + +# Add component files into working directory +WORKDIR /var/teraflow +COPY src/context/. context/ +COPY src/dlt/connector/. dlt/connector + +# Start the service +ENTRYPOINT ["python", "-m", "dlt.connector.service"] diff --git a/src/dlt/connector/client/DltConnectorClient.py b/src/dlt/connector/client/DltConnectorClient.py new file mode 100644 index 0000000000000000000000000000000000000000..f48562996b067ca81a99b6ceb7288029be7ba1c8 --- /dev/null +++ b/src/dlt/connector/client/DltConnectorClient.py @@ -0,0 +1,95 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import grpc, logging +from common.Constants import ServiceNameEnum +from common.Settings import get_service_host, get_service_port_grpc +from common.proto.context_pb2 import DeviceId, Empty, ServiceId, SliceId +from common.proto.dlt_connector_pb2_grpc import DltConnectorServiceStub +from common.tools.client.RetryDecorator import retry, delay_exponential +from common.tools.grpc.Tools import grpc_message_to_json_string + +LOGGER = logging.getLogger(__name__) +MAX_RETRIES = 15 +DELAY_FUNCTION = delay_exponential(initial=0.01, increment=2.0, maximum=5.0) +RETRY_DECORATOR = retry(max_retries=MAX_RETRIES, delay_function=DELAY_FUNCTION, prepare_method_name='connect') + +class DltConnectorClient: + def __init__(self, host=None, port=None): + if not host: host = get_service_host(ServiceNameEnum.DLT) + if not port: port = get_service_port_grpc(ServiceNameEnum.DLT) + self.endpoint = '{:s}:{:s}'.format(str(host), str(port)) + LOGGER.debug('Creating channel to {:s}...'.format(self.endpoint)) + self.channel = None + self.stub = None + self.connect() + LOGGER.debug('Channel created') + + def connect(self): + self.channel = grpc.insecure_channel(self.endpoint) + self.stub = DltConnectorServiceStub(self.channel) + + def close(self): + if self.channel is not None: self.channel.close() + self.channel = None + self.stub = None + + @RETRY_DECORATOR + def RecordAll(self, request : Empty) -> Empty: + LOGGER.debug('RecordAll request: {:s}'.format(grpc_message_to_json_string(request))) + response = self.stub.RecordAll(request) + LOGGER.debug('RecordAll result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + def RecordAllDevices(self, request : Empty) -> Empty: + LOGGER.debug('RecordAllDevices request: {:s}'.format(grpc_message_to_json_string(request))) + response = self.stub.RecordAllDevices(request) + LOGGER.debug('RecordAllDevices result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + def RecordDevice(self, request : DeviceId) -> Empty: + LOGGER.debug('RecordDevice request: {:s}'.format(grpc_message_to_json_string(request))) + response = self.stub.RecordDevice(request) + LOGGER.debug('RecordDevice result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + def RecordAllServices(self, request : Empty) -> Empty: + LOGGER.debug('RecordAllServices request: {:s}'.format(grpc_message_to_json_string(request))) + response = self.stub.RecordAllServices(request) + LOGGER.debug('RecordAllServices result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + def RecordService(self, request : ServiceId) -> Empty: + LOGGER.debug('RecordService request: {:s}'.format(grpc_message_to_json_string(request))) + response = self.stub.RecordService(request) + LOGGER.debug('RecordService result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + def RecordAllSlices(self, request : Empty) -> Empty: + LOGGER.debug('RecordAllSlices request: {:s}'.format(grpc_message_to_json_string(request))) + response = self.stub.RecordAllSlices(request) + LOGGER.debug('RecordAllSlices result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + def RecordSlice(self, request : SliceId) -> Empty: + LOGGER.debug('RecordSlice request: {:s}'.format(grpc_message_to_json_string(request))) + response = self.stub.RecordSlice(request) + LOGGER.debug('RecordSlice result: {:s}'.format(grpc_message_to_json_string(response))) + return response diff --git a/src/dlt/connector/client/DltEventsCollector.py b/src/dlt/connector/client/DltEventsCollector.py new file mode 100644 index 0000000000000000000000000000000000000000..6fe2474cead37094c507a8a612181dc7f7243544 --- /dev/null +++ b/src/dlt/connector/client/DltEventsCollector.py @@ -0,0 +1,72 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import grpc, logging, queue, threading +from common.proto.dlt_gateway_pb2 import DltRecordSubscription +from common.tools.grpc.Tools import grpc_message_to_json_string +from dlt.connector.client.DltGatewayClient import DltGatewayClient + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +class DltEventsCollector: + def __init__( + self, dltgateway_client : DltGatewayClient, + log_events_received : bool = False, + ) -> None: + self._events_queue = queue.Queue() + self._log_events_received = log_events_received + subscription = DltRecordSubscription() # bu default subscribe to all + self._dltgateway_stream = dltgateway_client.SubscribeToDlt(subscription) + self._dltgateway_thread = self._create_collector_thread(self._dltgateway_stream) + + def _create_collector_thread(self, stream, as_daemon : bool = False): + return threading.Thread(target=self._collect, args=(stream,), daemon=as_daemon) + + def _collect(self, events_stream) -> None: + try: + for event in events_stream: + if self._log_events_received: + LOGGER.info('[_collect] event: {:s}'.format(grpc_message_to_json_string(event))) + self._events_queue.put_nowait(event) + except grpc.RpcError as e: + if e.code() != grpc.StatusCode.CANCELLED: # pylint: disable=no-member + raise # pragma: no cover + + def start(self): + if self._dltgateway_thread is not None: self._dltgateway_thread.start() + + def get_event(self, block : bool = True, timeout : float = 0.1): + try: + return self._events_queue.get(block=block, timeout=timeout) + except queue.Empty: # pylint: disable=catching-non-exception + return None + + def get_events(self, block : bool = True, timeout : float = 0.1, count : int = None): + events = [] + if count is None: + while True: + event = self.get_event(block=block, timeout=timeout) + if event is None: break + events.append(event) + else: + for _ in range(count): + event = self.get_event(block=block, timeout=timeout) + if event is None: continue + events.append(event) + return sorted(events, key=lambda e: e.event.timestamp.timestamp) + + def stop(self): + if self._dltgateway_stream is not None: self._dltgateway_stream.cancel() + if self._dltgateway_thread is not None: self._dltgateway_thread.join() diff --git a/src/dlt/connector/client/DltGatewayClient.py b/src/dlt/connector/client/DltGatewayClient.py new file mode 100644 index 0000000000000000000000000000000000000000..f1f8dec391bb836cea33422176730d250090429d --- /dev/null +++ b/src/dlt/connector/client/DltGatewayClient.py @@ -0,0 +1,84 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Iterator +import grpc, logging +from common.Constants import ServiceNameEnum +from common.Settings import get_service_host, get_service_port_grpc +from common.proto.context_pb2 import Empty, TeraFlowController +from common.proto.dlt_gateway_pb2 import ( + DltPeerStatus, DltPeerStatusList, DltRecord, DltRecordEvent, DltRecordId, DltRecordStatus, DltRecordSubscription) +from common.proto.dlt_gateway_pb2_grpc import DltGatewayServiceStub +from common.tools.client.RetryDecorator import retry, delay_exponential +from common.tools.grpc.Tools import grpc_message_to_json_string + +LOGGER = logging.getLogger(__name__) +MAX_RETRIES = 15 +DELAY_FUNCTION = delay_exponential(initial=0.01, increment=2.0, maximum=5.0) +RETRY_DECORATOR = retry(max_retries=MAX_RETRIES, delay_function=DELAY_FUNCTION, prepare_method_name='connect') + +class DltGatewayClient: + def __init__(self, host=None, port=None): + if not host: host = get_service_host(ServiceNameEnum.DLT) + if not port: port = get_service_port_grpc(ServiceNameEnum.DLT) + self.endpoint = '{:s}:{:s}'.format(str(host), str(port)) + LOGGER.debug('Creating channel to {:s}...'.format(self.endpoint)) + self.channel = None + self.stub = None + self.connect() + LOGGER.debug('Channel created') + + def connect(self): + self.channel = grpc.insecure_channel(self.endpoint) + self.stub = DltGatewayServiceStub(self.channel) + + def close(self): + if self.channel is not None: self.channel.close() + self.channel = None + self.stub = None + + @RETRY_DECORATOR + def RecordToDlt(self, request : DltRecord) -> DltRecordStatus: + LOGGER.debug('RecordToDlt request: {:s}'.format(grpc_message_to_json_string(request))) + response = self.stub.RecordToDlt(request) + LOGGER.debug('RecordToDlt result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + def GetFromDlt(self, request : DltRecordId) -> DltRecord: + LOGGER.debug('GetFromDlt request: {:s}'.format(grpc_message_to_json_string(request))) + response = self.stub.GetFromDlt(request) + LOGGER.debug('GetFromDlt result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + def SubscribeToDlt(self, request : DltRecordSubscription) -> Iterator[DltRecordEvent]: + LOGGER.debug('SubscribeToDlt request: {:s}'.format(grpc_message_to_json_string(request))) + response = self.stub.SubscribeToDlt(request) + LOGGER.debug('SubscribeToDlt result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + def GetDltStatus(self, request : TeraFlowController) -> DltPeerStatus: + LOGGER.debug('GetDltStatus request: {:s}'.format(grpc_message_to_json_string(request))) + response = self.stub.GetDltStatus(request) + LOGGER.debug('GetDltStatus result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + def GetDltPeers(self, request : Empty) -> DltPeerStatusList: + LOGGER.debug('GetDltPeers request: {:s}'.format(grpc_message_to_json_string(request))) + response = self.stub.GetDltPeers(request) + LOGGER.debug('GetDltPeers result: {:s}'.format(grpc_message_to_json_string(response))) + return response diff --git a/src/dlt/connector/client/__init__.py b/src/dlt/connector/client/__init__.py index 70a33251242c51f49140e596b8208a19dd5245f7..9953c820575d42fa88351cc8de022d880ba96e6a 100644 --- a/src/dlt/connector/client/__init__.py +++ b/src/dlt/connector/client/__init__.py @@ -11,4 +11,3 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - diff --git a/src/dlt/connector/main_test.py b/src/dlt/connector/main_test.py new file mode 100644 index 0000000000000000000000000000000000000000..4ad90eb35444b7ba4de00159372e466e8fc68905 --- /dev/null +++ b/src/dlt/connector/main_test.py @@ -0,0 +1,43 @@ +# pip install grpcio==1.47.0 grpcio-tools==1.47.0 protobuf==3.20.1 +# PYTHONPATH=/home/cttc/teraflow/src python -m dlt.connector.main_test + +import logging, sys, time +from common.proto.dlt_gateway_pb2 import DLTRECORDOPERATION_ADD, DLTRECORDOPERATION_UPDATE, DLTRECORDTYPE_DEVICE, DltRecord +from common.tools.object_factory.Device import json_device +from common.tools.grpc.Tools import grpc_message_to_json_string +from src.common.proto.context_pb2 import DEVICEOPERATIONALSTATUS_ENABLED, Device +from .client.DltGatewayClient import DltGatewayClient +from .client.DltEventsCollector import DltEventsCollector + +logging.basicConfig(level=logging.INFO) +LOGGER = logging.getLogger(__name__) + +def main(): + dltgateway_client = DltGatewayClient(host='127.0.0.1', port=50051) + dltgateway_collector = DltEventsCollector(dltgateway_client, log_events_received=True) + dltgateway_collector.start() + + time.sleep(3) + + device = Device(**json_device('dev-1', 'packet-router', DEVICEOPERATIONALSTATUS_ENABLED)) + + r2dlt_req = DltRecord() + r2dlt_req.record_id.domain_uuid.uuid = 'tfs-a' + r2dlt_req.record_id.type = DLTRECORDTYPE_DEVICE + r2dlt_req.record_id.record_uuid.uuid = device.device_id.device_uuid.uuid + r2dlt_req.operation = DLTRECORDOPERATION_ADD + r2dlt_req.data_json = grpc_message_to_json_string(device) + LOGGER.info('r2dlt_req = {:s}'.format(grpc_message_to_json_string(r2dlt_req))) + r2dlt_rep = dltgateway_client.RecordToDlt(r2dlt_req) + LOGGER.info('r2dlt_rep = {:s}'.format(grpc_message_to_json_string(r2dlt_rep))) + + dlt2r_req = r2dlt_req.record_id + LOGGER.info('dlt2r_req = {:s}'.format(grpc_message_to_json_string(dlt2r_req))) + dlt2r_rep = dltgateway_client.GetFromDlt(dlt2r_req) + LOGGER.info('dlt2r_rep = {:s}'.format(grpc_message_to_json_string(dlt2r_rep))) + + dltgateway_collector.stop() + return 0 + +if __name__ == '__main__': + sys.exit(main()) diff --git a/src/dlt/connector/requirements.in b/src/dlt/connector/requirements.in new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/src/dlt/connector/service/DltConnector.py b/src/dlt/connector/service/DltConnector.py new file mode 100644 index 0000000000000000000000000000000000000000..0c42d66852e8eb895a07c761f7535a0d768a9e91 --- /dev/null +++ b/src/dlt/connector/service/DltConnector.py @@ -0,0 +1,51 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, threading +from common.tools.grpc.Tools import grpc_message_to_json_string +from context.client.ContextClient import ContextClient +from context.client.EventsCollector import EventsCollector +from dlt.connector.client.DltConnectorClient import DltConnectorClient + +LOGGER = logging.getLogger(__name__) + +class DltConnector: + def __init__(self) -> None: + LOGGER.debug('Creating connector...') + self._terminate = threading.Event() + self._thread = None + LOGGER.debug('Connector created') + + def start(self): + self._terminate.clear() + self._thread = threading.Thread(target=self._run_events_collector) + self._thread.start() + + def _run_events_collector(self) -> None: + dltconnector_client = DltConnectorClient() + context_client = ContextClient() + events_collector = EventsCollector(context_client) + events_collector.start() + + while not self._terminate.is_set(): + event = events_collector.get_event() + LOGGER.info('Event from Context Received: {:s}'.format(grpc_message_to_json_string(event))) + + events_collector.stop() + context_client.close() + dltconnector_client.close() + + def stop(self): + self._terminate.set() + self._thread.join() diff --git a/src/dlt/connector/service/DltConnectorService.py b/src/dlt/connector/service/DltConnectorService.py new file mode 100644 index 0000000000000000000000000000000000000000..40237b628776f7053092b45d036072fbde35253c --- /dev/null +++ b/src/dlt/connector/service/DltConnectorService.py @@ -0,0 +1,28 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from common.Constants import ServiceNameEnum +from common.Settings import get_service_port_grpc +from common.tools.service.GenericGrpcService import GenericGrpcService +from common.proto.dlt_connector_pb2_grpc import add_DltConnectorServiceServicer_to_server +from .DltConnectorServiceServicerImpl import DltConnectorServiceServicerImpl + +class DltConnectorService(GenericGrpcService): + def __init__(self, cls_name: str = __name__) -> None: + port = get_service_port_grpc(ServiceNameEnum.DLT) + super().__init__(port, cls_name=cls_name) + self.dltconnector_servicer = DltConnectorServiceServicerImpl() + + def install_servicers(self): + add_DltConnectorServiceServicer_to_server(self.dltconnector_servicer, self.server) diff --git a/src/dlt/connector/service/DltConnectorServiceServicerImpl.py b/src/dlt/connector/service/DltConnectorServiceServicerImpl.py new file mode 100644 index 0000000000000000000000000000000000000000..860e46f3ab88b097f4aa8e06508b19518055e46f --- /dev/null +++ b/src/dlt/connector/service/DltConnectorServiceServicerImpl.py @@ -0,0 +1,62 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import grpc, logging +from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method +from common.proto.context_pb2 import DeviceId, Empty, ServiceId, SliceId +from common.proto.dlt_connector_pb2_grpc import DltConnectorServiceServicer + +LOGGER = logging.getLogger(__name__) + +SERVICE_NAME = 'DltConnector' +METHOD_NAMES = [ + 'RecordAll', + 'RecordAllDevices', 'RecordDevice', + 'RecordAllServices', 'RecordService', + 'RecordAllSlices', 'RecordSlice', +] +METRICS = create_metrics(SERVICE_NAME, METHOD_NAMES) + +class DltConnectorServiceServicerImpl(DltConnectorServiceServicer): + def __init__(self): + LOGGER.debug('Creating Servicer...') + LOGGER.debug('Servicer Created') + + @safe_and_metered_rpc_method(METRICS, LOGGER) + def RecordAll(self, request : Empty, context : grpc.ServicerContext) -> Empty: + return Empty() + + @safe_and_metered_rpc_method(METRICS, LOGGER) + def RecordAllDevices(self, request : Empty, context : grpc.ServicerContext) -> Empty: + return Empty() + + @safe_and_metered_rpc_method(METRICS, LOGGER) + def RecordDevice(self, request : DeviceId, context : grpc.ServicerContext) -> Empty: + return Empty() + + @safe_and_metered_rpc_method(METRICS, LOGGER) + def RecordAllServices(self, request : Empty, context : grpc.ServicerContext) -> Empty: + return Empty() + + @safe_and_metered_rpc_method(METRICS, LOGGER) + def RecordService(self, request : ServiceId, context : grpc.ServicerContext) -> Empty: + return Empty() + + @safe_and_metered_rpc_method(METRICS, LOGGER) + def RecordAllSlices(self, request : Empty, context : grpc.ServicerContext) -> Empty: + return Empty() + + @safe_and_metered_rpc_method(METRICS, LOGGER) + def RecordSlice(self, request : SliceId, context : grpc.ServicerContext) -> Empty: + return Empty() diff --git a/src/dlt/connector/service/__init__.py b/src/dlt/connector/service/__init__.py index 70a33251242c51f49140e596b8208a19dd5245f7..9953c820575d42fa88351cc8de022d880ba96e6a 100644 --- a/src/dlt/connector/service/__init__.py +++ b/src/dlt/connector/service/__init__.py @@ -11,4 +11,3 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - diff --git a/src/dlt/connector/service/__main__.py b/src/dlt/connector/service/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..435a93f61bf934a17d9c044756648176e9cb2d2d --- /dev/null +++ b/src/dlt/connector/service/__main__.py @@ -0,0 +1,65 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, signal, sys, threading +from prometheus_client import start_http_server +from common.Constants import ServiceNameEnum +from common.Settings import ( + ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_log_level, get_metrics_port, + wait_for_environment_variables) +from .DltConnectorService import DltConnectorService + +terminate = threading.Event() +LOGGER : logging.Logger = None + +def signal_handler(signal, frame): # pylint: disable=redefined-outer-name + LOGGER.warning('Terminate signal received') + terminate.set() + +def main(): + global LOGGER # pylint: disable=global-statement + + log_level = get_log_level() + logging.basicConfig(level=log_level) + LOGGER = logging.getLogger(__name__) + + wait_for_environment_variables([ + get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_HOST ), + get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC), + ]) + + signal.signal(signal.SIGINT, signal_handler) + signal.signal(signal.SIGTERM, signal_handler) + + LOGGER.info('Starting...') + + # Start metrics server + metrics_port = get_metrics_port() + start_http_server(metrics_port) + + # Starting DLT connector service + grpc_service = DltConnectorService() + grpc_service.start() + + # Wait for Ctrl+C or termination signal + while not terminate.wait(timeout=0.1): pass + + LOGGER.info('Terminating...') + grpc_service.stop() + + LOGGER.info('Bye') + return 0 + +if __name__ == '__main__': + sys.exit(main()) diff --git a/src/dlt/connector/tests/MockService_Dependencies.py b/src/dlt/connector/tests/MockService_Dependencies.py new file mode 100644 index 0000000000000000000000000000000000000000..65ddc3cb48cb878b2ab5ba8b5ec44479b0b71451 --- /dev/null +++ b/src/dlt/connector/tests/MockService_Dependencies.py @@ -0,0 +1,38 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +from typing import Union +from common.Constants import ServiceNameEnum +from common.Settings import ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name +from common.proto.dlt_gateway_pb2_grpc import add_DltGatewayServiceServicer_to_server +from common.tests.MockServicerImpl_DltGateway import MockServicerImpl_DltGateway +from common.tools.service.GenericGrpcService import GenericGrpcService + +LOCAL_HOST = '127.0.0.1' + +SERVICE_DLT = ServiceNameEnum.DLT + +class MockService_Dependencies(GenericGrpcService): + def __init__(self, bind_port: Union[str, int]) -> None: + super().__init__(bind_port, LOCAL_HOST, enable_health_servicer=False, cls_name='MockService') + + # pylint: disable=attribute-defined-outside-init + def install_servicers(self): + self.dltgateway_servicer = MockServicerImpl_DltGateway() + add_DltGatewayServiceServicer_to_server(self.dltgateway_servicer, self.server) + + def configure_env_vars(self): + os.environ[get_env_var_name(SERVICE_DLT, ENVVAR_SUFIX_SERVICE_HOST )] = str(self.bind_address) + os.environ[get_env_var_name(SERVICE_DLT, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(self.bind_port) diff --git a/src/dlt/connector/tests/Objects.py b/src/dlt/connector/tests/Objects.py new file mode 100644 index 0000000000000000000000000000000000000000..f797e93e6f2f4f6597a667fff61b2b8ba1cbd72a --- /dev/null +++ b/src/dlt/connector/tests/Objects.py @@ -0,0 +1,81 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from common.tools.object_factory.Context import json_context, json_context_id +from common.tools.object_factory.Device import json_device_emulated_packet_router_disabled, json_device_id +from common.tools.object_factory.EndPoint import json_endpoints +from common.tools.object_factory.Link import compose_link +from common.tools.object_factory.Topology import json_topology, json_topology_id + +def compose_device( + device_uuid, endpoint_uuids, endpoint_type='copper', endpoint_topology_id=None, endpoint_sample_types=[] +): + device_id = json_device_id(device_uuid) + endpoints = [(endpoint_uuid, endpoint_type, endpoint_sample_types) for endpoint_uuid in endpoint_uuids] + endpoints = json_endpoints(device_id, endpoints, topology_id=endpoint_topology_id) + device = json_device_emulated_packet_router_disabled(device_uuid, endpoints=endpoints) + return device_id, endpoints, device + +# ===== Domain A ======================================================================================================= + +# ----- Context -------------------------------------------------------------------------------------------------------- +DA_CONTEXT_ADMIN_ID = json_context_id('A') +DA_CONTEXT_ADMIN = json_context('A') + +# ----- Topology ------------------------------------------------------------------------------------------------------- +DA_TOPOLOGY_ADMIN_ID = json_topology_id('A', context_id=DA_CONTEXT_ADMIN_ID) +DA_TOPOLOGY_ADMIN = json_topology('A', context_id=DA_CONTEXT_ADMIN_ID) + +# ----- Devices -------------------------------------------------------------------------------------------------------- +DA_DEVICE_DEV1_ID, DA_DEVICE_DEV1_ENDPOINTS, DA_DEVICE_DEV1 = compose_device('DEV1@A', ['1', '2']) +DA_DEVICE_DEV2_ID, DA_DEVICE_DEV2_ENDPOINTS, DA_DEVICE_DEV2 = compose_device('DEV2@A', ['1', '2']) +DA_DEVICE_DEV3_ID, DA_DEVICE_DEV3_ENDPOINTS, DA_DEVICE_DEV3 = compose_device('DEV3@A', ['1', '2']) + +# ----- Links ---------------------------------------------------------------------------------------------------------- +DA_LINK_DEV1_DEV2_ID, DA_LINK_DEV1_DEV2 = compose_link(DA_DEVICE_DEV1_ENDPOINTS[0], DA_DEVICE_DEV2_ENDPOINTS[0]) +DA_LINK_DEV1_DEV3_ID, DA_LINK_DEV1_DEV3 = compose_link(DA_DEVICE_DEV1_ENDPOINTS[1], DA_DEVICE_DEV3_ENDPOINTS[0]) +DA_LINK_DEV2_DEV3_ID, DA_LINK_DEV2_DEV3 = compose_link(DA_DEVICE_DEV2_ENDPOINTS[1], DA_DEVICE_DEV3_ENDPOINTS[1]) + +# ----- Containers ----------------------------------------------------------------------------------------------------- +DA_CONTEXTS = [DA_CONTEXT_ADMIN] +DA_TOPOLOGIES = [DA_TOPOLOGY_ADMIN] +DA_DEVICES = [DA_DEVICE_DEV1, DA_DEVICE_DEV2, DA_DEVICE_DEV3] +DA_LINKS = [DA_LINK_DEV1_DEV2, DA_LINK_DEV1_DEV3, DA_LINK_DEV2_DEV3] + + +# ===== Domain B ======================================================================================================= + +# ----- Context -------------------------------------------------------------------------------------------------------- +DB_CONTEXT_ADMIN_ID = json_context_id('B') +DB_CONTEXT_ADMIN = json_context('B') + +# ----- Topology ------------------------------------------------------------------------------------------------------- +DB_TOPOLOGY_ADMIN_ID = json_topology_id('B', context_id=DB_CONTEXT_ADMIN_ID) +DB_TOPOLOGY_ADMIN = json_topology('B', context_id=DB_CONTEXT_ADMIN_ID) + +# ----- Devices -------------------------------------------------------------------------------------------------------- +DB_DEVICE_DEV1_ID, DB_DEVICE_DEV1_ENDPOINTS, DB_DEVICE_DEV1 = compose_device('DEV1@B', ['1', '2']) +DB_DEVICE_DEV2_ID, DB_DEVICE_DEV2_ENDPOINTS, DB_DEVICE_DEV2 = compose_device('DEV2@B', ['1', '2']) +DB_DEVICE_DEV3_ID, DB_DEVICE_DEV3_ENDPOINTS, DB_DEVICE_DEV3 = compose_device('DEV3@B', ['1', '2']) + +# ----- Links ---------------------------------------------------------------------------------------------------------- +DB_LINK_DEV1_DEV2_ID, DB_LINK_DEV1_DEV2 = compose_link(DB_DEVICE_DEV1_ENDPOINTS[0], DB_DEVICE_DEV2_ENDPOINTS[0]) +DB_LINK_DEV1_DEV3_ID, DB_LINK_DEV1_DEV3 = compose_link(DB_DEVICE_DEV1_ENDPOINTS[1], DB_DEVICE_DEV3_ENDPOINTS[0]) +DB_LINK_DEV2_DEV3_ID, DB_LINK_DEV2_DEV3 = compose_link(DB_DEVICE_DEV2_ENDPOINTS[1], DB_DEVICE_DEV3_ENDPOINTS[1]) + +# ----- Containers ----------------------------------------------------------------------------------------------------- +DB_CONTEXTS = [DB_CONTEXT_ADMIN] +DB_TOPOLOGIES = [DB_TOPOLOGY_ADMIN] +DB_DEVICES = [DB_DEVICE_DEV1, DB_DEVICE_DEV2, DB_DEVICE_DEV3] +DB_LINKS = [DB_LINK_DEV1_DEV2, DB_LINK_DEV1_DEV3, DB_LINK_DEV2_DEV3] diff --git a/src/dlt/connector/tests/PrepareTestScenario.py b/src/dlt/connector/tests/PrepareTestScenario.py new file mode 100644 index 0000000000000000000000000000000000000000..5c5d1cb5cc1c6868a5b47d929f026deecbe52f52 --- /dev/null +++ b/src/dlt/connector/tests/PrepareTestScenario.py @@ -0,0 +1,109 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os, pytest +from typing import Tuple +from common.Constants import ServiceNameEnum +from common.Settings import ( + ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_service_port_grpc) +from common.orm.Database import Database +from common.orm.Factory import get_database_backend, BackendEnum as DatabaseBackendEnum +from common.message_broker.Factory import get_messagebroker_backend, BackendEnum as MessageBrokerBackendEnum +from common.message_broker.MessageBroker import MessageBroker +from context.client.ContextClient import ContextClient +from context.service.grpc_server.ContextService import ContextService +from dlt.connector.client.DltConnectorClient import DltConnectorClient +from dlt.connector.service.DltConnectorService import DltConnectorService +from .MockService_Dependencies import MockService_Dependencies + +LOCAL_HOST = '127.0.0.1' +MOCKSERVICE_PORT = 10000 +#GRPC_PORT = 10000 + get_service_port_grpc(ServiceNameEnum.CONTEXT) # avoid privileged ports +#os.environ[get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_HOST )] = str(LOCAL_HOST) +#os.environ[get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(GRPC_PORT) + +# ===== BlockChain Emulator (Mock DLT Gateway) ========================================================================= +# A single gateway is used for all the domains + +@pytest.fixture(scope='session') +def dltgateway_service(): + _service = MockService_Dependencies(MOCKSERVICE_PORT) + _service.configure_env_vars() + _service.start() + yield _service + _service.stop() + +# ===== Domain A (Real Context + Real DLT Connector) =================================================================== + +@pytest.fixture(scope='session') +def context_service_a(): # pylint: disable=redefined-outer-name + _database = Database(get_database_backend(backend=DatabaseBackendEnum.INMEMORY)) + _message_broker = MessageBroker(get_messagebroker_backend(backend=MessageBrokerBackendEnum.INMEMORY)) + _service = ContextService(_database, _message_broker) + _service.start() + yield _service + _service.stop() + _message_broker.terminate() + +@pytest.fixture(scope='session') +def context_client_a(context_service_a : ContextService): # pylint: disable=redefined-outer-name + _client = ContextClient(host=context_service_a.bind_address, port=context_service_a.bind_port) + yield _client + _client.close() + +@pytest.fixture(scope='session') +def dltconnector_service_a(): + _service = DltConnectorService() + _service.bind_port += 1 + _service.start() + yield _service + _service.stop() + +@pytest.fixture(scope='session') +def dltconnector_client_a(dltconnector_service_a : DltConnectorService): # pylint: disable=redefined-outer-name + _client = DltConnectorClient(host=dltconnector_service_a.bind_address, port=dltconnector_service_a.bind_port) + yield _client + _client.close() + +# ===== Domain B (Real Context + Real DLT Connector) =================================================================== + +@pytest.fixture(scope='session') +def context_service_b(): # pylint: disable=redefined-outer-name + _database = Database(get_database_backend(backend=DatabaseBackendEnum.INMEMORY)) + _message_broker = MessageBroker(get_messagebroker_backend(backend=MessageBrokerBackendEnum.INMEMORY)) + _service = ContextService(_database, _message_broker) + _service.start() + yield _service + _service.stop() + _message_broker.terminate() + +@pytest.fixture(scope='session') +def context_client_b(context_service_b : ContextService): # pylint: disable=redefined-outer-name + _client = ContextClient(host=context_service_b.bind_address, port=context_service_b.bind_port) + yield _client + _client.close() + +@pytest.fixture(scope='session') +def dltconnector_service_b(): + _service = DltConnectorService() + _service.bind_port += 2 + _service.start() + yield _service + _service.stop() + +@pytest.fixture(scope='session') +def dltconnector_client_b(dltconnector_service_b : DltConnectorService): # pylint: disable=redefined-outer-name + _client = DltConnectorClient(host=dltconnector_service_b.bind_address, port=dltconnector_service_b.bind_port) + yield _client + _client.close() diff --git a/src/dlt/connector/tests/__init__.py b/src/dlt/connector/tests/__init__.py index 70a33251242c51f49140e596b8208a19dd5245f7..9953c820575d42fa88351cc8de022d880ba96e6a 100644 --- a/src/dlt/connector/tests/__init__.py +++ b/src/dlt/connector/tests/__init__.py @@ -11,4 +11,3 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - diff --git a/src/dlt/connector/tests/test_unitary.py b/src/dlt/connector/tests/test_unitary.py new file mode 100644 index 0000000000000000000000000000000000000000..00c1164e1becde1d56de2a6c53c51160a31fc6f7 --- /dev/null +++ b/src/dlt/connector/tests/test_unitary.py @@ -0,0 +1,54 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from typing import Tuple +from common.orm.Database import Database +from common.message_broker.MessageBroker import MessageBroker +from common.proto.context_pb2 import Context, ContextId, Device, DeviceId, Link, LinkId, Topology, TopologyId +from context.client.ContextClient import ContextClient +from .PrepareTestScenario import ( + # pylint: disable=unused-import + dltgateway_service, + context_service_a, context_client_a, dltconnector_service_a, dltconnector_client_a, + context_service_b, context_client_b, dltconnector_service_b, dltconnector_client_b) +from .Objects import ( + DA_CONTEXTS, DA_TOPOLOGIES, DA_DEVICES, DA_LINKS, + DB_CONTEXTS, DB_TOPOLOGIES, DB_DEVICES, DB_LINKS) + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +def test_create_events( + context_client : ContextClient, # pylint: disable=redefined-outer-name + context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name + + for context in CONTEXTS : context_client.SetContext (Context (**context )) + for topology in TOPOLOGIES: context_client.SetTopology(Topology(**topology)) + for device in DEVICES : context_client.SetDevice (Device (**device )) + for link in LINKS : context_client.SetLink (Link (**link )) + + + for link in LINKS : context_client.RemoveLink (LinkId (**link ['link_id' ])) + for device in DEVICES : context_client.RemoveDevice (DeviceId (**device ['device_id' ])) + for topology in TOPOLOGIES: context_client.RemoveTopology(TopologyId(**topology['topology_id'])) + for context in CONTEXTS : context_client.RemoveContext (ContextId (**context ['context_id' ])) + + + + dltgateway_client = DltGatewayClient(host='127.0.0.1', port=50051) + dltgateway_collector = DltEventsCollector(dltgateway_client, log_events_received=True) + dltgateway_collector.start() + + dltgateway_collector.stop() diff --git a/src/dlt/gateway/.gitignore b/src/dlt/gateway/.gitignore index 1de6c650e4e3891fba0a81d585634f635e03a5c4..9ecdb254cd217f06171ac30934a34f898a7d77dc 100644 --- a/src/dlt/gateway/.gitignore +++ b/src/dlt/gateway/.gitignore @@ -87,4 +87,4 @@ gradle-app.setting .gradletasknamecache local.properties -wallet/ \ No newline at end of file +wallet*/ \ No newline at end of file diff --git a/src/dlt/gateway/Dockerfile b/src/dlt/gateway/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..92ef8e425f40eaf718c4562c836517128dbb2d6f --- /dev/null +++ b/src/dlt/gateway/Dockerfile @@ -0,0 +1,41 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM zenika/kotlin:1.4-jdk12 + +# Make working directory move to it and copy DLT Gateway code +RUN mkdir -p /var/teraflow/dlt/gateway +WORKDIR /var/teraflow/dlt/gateway +COPY src/dlt/gateway/. ./ + +# Make directory for proto files and copy them +RUN mkdir proto +COPY proto/*.proto ./proto/ + +# Build DLT Gateway +RUN ./gradlew build + +EXPOSE 50051 + +# Create entrypoint.sh script +RUN echo "#!/bin/sh" > /entrypoint.sh +RUN echo "echo 195.37.154.24 peer0.org1.example.com >> /etc/hosts" >> /entrypoint.sh +RUN echo "echo 195.37.154.24 peer0.org2.example.com >> /etc/hosts" >> /entrypoint.sh +RUN echo "echo 195.37.154.24 orderer0.example.com >> /etc/hosts" >> /entrypoint.sh +RUN echo "cd /var/teraflow/dlt/gateway" >> /entrypoint.sh +RUN echo "./gradlew runServer" >> /entrypoint.sh +RUN chmod +x /entrypoint.sh + +# Gateway entry point +ENTRYPOINT ["sh", "/entrypoint.sh"] diff --git a/src/dlt/gateway/README.md b/src/dlt/gateway/README.md index 361de07c6a35fb7951f063a9aa6fc3fb28d3ba0d..2cf6cfeb1682ade5a77f53fe13c96daed6dc33fd 100644 --- a/src/dlt/gateway/README.md +++ b/src/dlt/gateway/README.md @@ -13,7 +13,7 @@ duplication of the object or source code - either totally or in part - is strictly prohibited. - Copyright (c) 2021 NEC Laboratories Europe GmbH + Copyright (c) 2022 NEC Laboratories Europe GmbH All Rights Reserved. Authors: Konstantin Munichev @@ -42,7 +42,7 @@ ## General information The DLT module is used to provide access to the underlying Fabric deployment. It allows clients to add, retrieve, modify and delete blockchain-backed data, essentially working as a key-value -database. External clients should use REST API to communicate with this service, its detailed +database. External clients should use gRPC API to communicate with this service, its detailed description available below. ## Code structure @@ -59,26 +59,76 @@ CRUD interface. Other files contain auxiliary code for `FabricConnector` which allows it to register/enroll users and to obtain smart contract instances. -### HTTP package -Contains server side HTTP handler. It accepts requests from the outside and performs the -requested operation. For the detailed description see API description section. +### Grpc package +Contains server side gRPC handler. It accepts requests from the outside and performs the +requested operation. For the more detailed description see Proto package description right below. ### Proto package -The proto package contains `Config.proto` file which contains messages for REST API. The most -important ones are `DltConfig` (it defines the whole DLT configuration) and `DltRecord` which -represents data to store in the blockchain. +The proto package contains `dlt.proto` file which defines gRPC service `DltService` API and messages +it uses. There are 3 main functions: `RecordToDlt` which allows to create/modify/delete data, +`GetFromDlt` which returns already written data and `SubscribeToDlt` which allows clients subscribe +for future create/modify/delete events with provided filters. +Other proto files don't play any significant role and could be safely ignored by end users. ### Client example This code is not necessary to the service, but it could be used to test the service. It contains -a sample REST client which connects the service and perform all the CRUD operations. - -## REST API description -| Method | URL | Input | Response code | Output | -| --- | ----------- | --- | --- | --- | -| POST | /dlt/configure | Configuration object | 201 or 400 | Status value | -| GET | /dlt/configure | - | 200 or 404 | Configuration object | -| POST | /dlt/record | Record object | 200, 201, 400 or 404 | Status value | -| GET | /dlt/record | Record id | 200 or 404 | Record object | - -Record and configuration object are defined in `proto` package. - +a sample gRPC client which connects the service and perform all the CRUD operations. + +# Fabric deployment notes + +## General notes +Current Fabric deployment uses Fabric test network with some additional helping scripts on top of it. +To start the network just run the `raft.sh` from `blockchain/scripts` directory. Use `stop.sh` +when you need to stop the network. + +## Server start preparations +To run the server it's necessary to copy certificate file +`fabric-samples/test-network/organizations/peerOrganizations/org1.example.com/ca/ca.org1.example.com-cert.pem` +to the config folder (replacing the existing one). Also, it's necessary to copy `scripts/connection-org1.json` +file (again, replacing the old one). After copying, it must be edited. First, all `localhost` entrances +should be replaced with `teraflow.nlehd.de`. Second, `channel` section at the end of the file should be removed. +This should be done after every restart of the Fabric network. + +## Fabric configuration +Even though a test network is easy to deploy and use it's better to perform a custom configuration +for a production deployment. In practice every participating organization will likely prefer to have +its own Peer/Orderer/CA instances to prevent possible dependency on any other participants. This leads +not only to a better privacy/availability/security in general but also to the more complicated +deployment process as a side effect. Here we provide a very brief description of the most important points. + +### Organizations +Organization represents a network participant, which can be an individual, a large corporation or any other +entity. Each organization has its own CAs, orderers and peers. The recommendation here is to create an +organization entity for every independent participant and then decide how many CAs/peers/orderers does +every organization need and which channels should it has access to based on the exact project's goals. + +### Channels +Each channel represents an independent ledger with its own genesis block. Each transaction is executed +on a specific channel, and it's possible to define which organization has access to a given channel. +As a result channels are a pretty powerful privacy mechanism which allows to limit access to the private +data between organization. + +### Certificate authorities, peers and orderers +Certificate authorities (CA) are used to generate crypto materials for each organization. Two types of CA +exist: one is used to generate the certificates of the admin, the MSP and certificates of non-admin users. +Another type of CA is used to generate TLS certificates. As a result it's preferable to have at least two +CAs for every organization. + +Peers are entities which host ledgers and smart contracts. They communicate with applications and orderers, +receiving chaincode invocations (proposals), invoking chaincode, updating ledger when necessary and +returning result of execution. Peers can handle one or many ledgers, depending on the configuration. It's +very use case specific how many peers are necessary to the exact deployment. + +Orderers are used to execute a consensus in a distributing network making sure that every channel participant +has the same blocks with the same data. The default consensus algorithm is Raft which provides only a crash +fault tolerance. + +### Conclusion +As you can see, configuration procedure for Fabric is pretty tricky and includes quite a lot of entities. +In real world it will very likely involve participants from multiple organizations each of them performing +its own part of configuration. + +As a further reading it's recommended to start with the +[official deployment guide](https://hyperledger-fabric.readthedocs.io/en/release-2.2/deployment_guide_overview.html). +It contains a high level overview of a deployment process as well as links to the detailed descriptions to +CA/Peer/Orderer configuration descriptions. \ No newline at end of file diff --git a/src/dlt/gateway/build.gradle.kts b/src/dlt/gateway/build.gradle.kts index 8eb0d53fa99ec972edd6ee03aafeb0d676f3d3c0..b65aff89e18077ffaff37ea732293f585ca7920d 100644 --- a/src/dlt/gateway/build.gradle.kts +++ b/src/dlt/gateway/build.gradle.kts @@ -39,16 +39,19 @@ import org.jetbrains.kotlin.gradle.tasks.KotlinCompile import com.google.protobuf.gradle.generateProtoTasks import com.google.protobuf.gradle.id +import com.google.protobuf.gradle.plugins import com.google.protobuf.gradle.protobuf import com.google.protobuf.gradle.protoc -ext["protobufVersion"] = "3.19.1" +ext["grpcVersion"] = "1.47.0" +ext["grpcKotlinVersion"] = "1.3.0" // CURRENT_GRPC_KOTLIN_VERSION +ext["protobufVersion"] = "3.20.1" ext["ktorVersion"] = "1.6.5" plugins { - kotlin("jvm") version "1.5.31" + kotlin("jvm") version "1.6.21" kotlin("plugin.serialization") version "1.4.21" - id("com.google.protobuf") version "0.8.17" + id("com.google.protobuf") version "0.8.18" application } @@ -56,22 +59,24 @@ group = "eu.neclab" version = "1.0-SNAPSHOT" repositories { + mavenLocal() + google() mavenCentral() } dependencies { - testImplementation("org.jetbrains.kotlin:kotlin-test:1.5.31") - implementation("org.hyperledger.fabric:fabric-gateway-java:2.2.2") - api("com.google.protobuf:protobuf-kotlin:${rootProject.ext["protobufVersion"]}") - implementation("io.ktor:ktor-server-core:${rootProject.ext["ktorVersion"]}") - implementation("io.ktor:ktor-server-netty:${rootProject.ext["ktorVersion"]}") - implementation("io.ktor:ktor-serialization:${rootProject.ext["ktorVersion"]}") - implementation("io.ktor:ktor-client-serialization:${rootProject.ext["ktorVersion"]}") - implementation("io.ktor:ktor-client-core:${rootProject.ext["ktorVersion"]}") - implementation("io.ktor:ktor-client-cio:${rootProject.ext["ktorVersion"]}") - implementation("ch.qos.logback:logback-classic:1.2.5") + implementation(kotlin("stdlib-jdk8")) + testImplementation("org.jetbrains.kotlin:kotlin-test:1.6.21") + implementation("javax.annotation:javax.annotation-api:1.3.2") + implementation("io.grpc:grpc-kotlin-stub:1.3.0") + implementation("io.grpc:grpc-protobuf:1.47.0") + implementation("com.google.protobuf:protobuf-kotlin:3.21.1") + implementation("org.jetbrains.kotlinx:kotlinx-coroutines-core:1.6.3") + implementation("org.hyperledger.fabric:fabric-gateway-java:2.2.5") + implementation("ch.qos.logback:logback-classic:1.2.11") implementation("org.jetbrains.kotlinx:kotlinx-serialization-json:1.3.1") implementation("org.jetbrains.kotlinx:kotlinx-serialization-protobuf:1.3.1") + runtimeOnly("io.grpc:grpc-netty:${rootProject.ext["grpcVersion"]}") } tasks.test { @@ -93,23 +98,50 @@ application { mainClass.set("MainKt") } +task("runServer", JavaExec::class) { + main = "grpc.FabricServerKt" + classpath = sourceSets["main"].runtimeClasspath +} + + sourceSets { main { proto { + srcDir("proto") srcDir("src/main/kotlin/proto") } } } +sourceSets { + val main by getting { } + main.java.srcDirs("build/generated/source/proto/main/grpc") + main.java.srcDirs("build/generated/source/proto/main/grpckt") + main.java.srcDirs("build/generated/source/proto/main/java") + main.java.srcDirs("build/generated/source/proto/main/kotlin") +} + protobuf { protoc { artifact = "com.google.protobuf:protoc:${rootProject.ext["protobufVersion"]}" } + plugins { + id("grpc") { + artifact = "io.grpc:protoc-gen-grpc-java:${rootProject.ext["grpcVersion"]}" + } + id("grpckt") { + artifact = "io.grpc:protoc-gen-grpc-kotlin:${rootProject.ext["grpcKotlinVersion"]}:jdk8@jar" + } + } generateProtoTasks { all().forEach { + it.plugins { + id("grpc") + id("grpckt") + } it.builtins { id("kotlin") } } } -} \ No newline at end of file +} diff --git a/src/dlt/gateway/config/ca.org1.example.com-cert.pem b/src/dlt/gateway/config/ca.org1.example.com-cert.pem index 5287a0f2bada9649c0d2ddd8eb8a71b2dac10df7..d7fdf63cc3f745d13edc8394bca67a1b41011ed2 100644 --- a/src/dlt/gateway/config/ca.org1.example.com-cert.pem +++ b/src/dlt/gateway/config/ca.org1.example.com-cert.pem @@ -1,14 +1,14 @@ -----BEGIN CERTIFICATE----- -MIICJjCCAc2gAwIBAgIUWZ4l32loO9+FM0FYw61y3dUF5a0wCgYIKoZIzj0EAwIw +MIICJzCCAc2gAwIBAgIUb5gDMfVeVdQjFkK3uC8LtlogN+gwCgYIKoZIzj0EAwIw cDELMAkGA1UEBhMCVVMxFzAVBgNVBAgTDk5vcnRoIENhcm9saW5hMQ8wDQYDVQQH EwZEdXJoYW0xGTAXBgNVBAoTEG9yZzEuZXhhbXBsZS5jb20xHDAaBgNVBAMTE2Nh -Lm9yZzEuZXhhbXBsZS5jb20wHhcNMjIwNzA1MDk0NDAwWhcNMzcwNzAxMDk0NDAw +Lm9yZzEuZXhhbXBsZS5jb20wHhcNMjIwOTI3MDgzMDAwWhcNMzcwOTIzMDgzMDAw WjBwMQswCQYDVQQGEwJVUzEXMBUGA1UECBMOTm9ydGggQ2Fyb2xpbmExDzANBgNV BAcTBkR1cmhhbTEZMBcGA1UEChMQb3JnMS5leGFtcGxlLmNvbTEcMBoGA1UEAxMT -Y2Eub3JnMS5leGFtcGxlLmNvbTBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABNPg -yfDxHr4ZmFp3HB19f27vfc1YTKBnznLqIFwVad2Y+eXfni8DnTRNGgwdkG9uIK2L -4Y9mwlKG/mTNx629G4GjRTBDMA4GA1UdDwEB/wQEAwIBBjASBgNVHRMBAf8ECDAG -AQH/AgEBMB0GA1UdDgQWBBSZlT6qe+DAGpEBXyMxzidqCkQ4PjAKBggqhkjOPQQD -AgNHADBEAiAIG5jwBGddB9CwocmjAzFv8+e7+0bvNSwjrG229QogTgIgbTNoC33P -mbR5ChlkUAW2t41hTOCSMIwLAlvEwpeCnAk= +Y2Eub3JnMS5leGFtcGxlLmNvbTBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABDC3 +spCTT3pjfFXxkX/SFuBgWRiceR8rSoCNQOnIPeNGZK8xl2Zr7VuY06gqy9c+ecSU +PUWaXiCQxiLgZuS6TOWjRTBDMA4GA1UdDwEB/wQEAwIBBjASBgNVHRMBAf8ECDAG +AQH/AgEBMB0GA1UdDgQWBBRFWSc7GZqcJJyJjXSEspzgAYInGzAKBggqhkjOPQQD +AgNIADBFAiEAodqc+adkiMuU6iv1IF8uJ/nMQbvMGoP3pb2827QzDosCICOw6W+y +uH03H3RO6KhOcS1ZzPjspyjrcC+dwzYX4DpW -----END CERTIFICATE----- diff --git a/src/dlt/gateway/config/connection-org1.json b/src/dlt/gateway/config/connection-org1.json index 320a20806650edaea9de45d0e208bc2b8dea12f0..6f6f3f08d65c495bb57551f6d0bfac38c9a2f8cc 100644 --- a/src/dlt/gateway/config/connection-org1.json +++ b/src/dlt/gateway/config/connection-org1.json @@ -24,9 +24,9 @@ }, "peers": { "peer0.org1.example.com": { - "url": "grpcs://s2:7051", + "url": "grpcs://teraflow.nlehd.de:7051", "tlsCACerts": { - "pem": "-----BEGIN CERTIFICATE-----\nMIICJjCCAc2gAwIBAgIUWZ4l32loO9+FM0FYw61y3dUF5a0wCgYIKoZIzj0EAwIw\ncDELMAkGA1UEBhMCVVMxFzAVBgNVBAgTDk5vcnRoIENhcm9saW5hMQ8wDQYDVQQH\nEwZEdXJoYW0xGTAXBgNVBAoTEG9yZzEuZXhhbXBsZS5jb20xHDAaBgNVBAMTE2Nh\nLm9yZzEuZXhhbXBsZS5jb20wHhcNMjIwNzA1MDk0NDAwWhcNMzcwNzAxMDk0NDAw\nWjBwMQswCQYDVQQGEwJVUzEXMBUGA1UECBMOTm9ydGggQ2Fyb2xpbmExDzANBgNV\nBAcTBkR1cmhhbTEZMBcGA1UEChMQb3JnMS5leGFtcGxlLmNvbTEcMBoGA1UEAxMT\nY2Eub3JnMS5leGFtcGxlLmNvbTBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABNPg\nyfDxHr4ZmFp3HB19f27vfc1YTKBnznLqIFwVad2Y+eXfni8DnTRNGgwdkG9uIK2L\n4Y9mwlKG/mTNx629G4GjRTBDMA4GA1UdDwEB/wQEAwIBBjASBgNVHRMBAf8ECDAG\nAQH/AgEBMB0GA1UdDgQWBBSZlT6qe+DAGpEBXyMxzidqCkQ4PjAKBggqhkjOPQQD\nAgNHADBEAiAIG5jwBGddB9CwocmjAzFv8+e7+0bvNSwjrG229QogTgIgbTNoC33P\nmbR5ChlkUAW2t41hTOCSMIwLAlvEwpeCnAk=\n-----END CERTIFICATE-----\n" + "pem": "-----BEGIN CERTIFICATE-----\nMIICJzCCAc2gAwIBAgIUb5gDMfVeVdQjFkK3uC8LtlogN+gwCgYIKoZIzj0EAwIw\ncDELMAkGA1UEBhMCVVMxFzAVBgNVBAgTDk5vcnRoIENhcm9saW5hMQ8wDQYDVQQH\nEwZEdXJoYW0xGTAXBgNVBAoTEG9yZzEuZXhhbXBsZS5jb20xHDAaBgNVBAMTE2Nh\nLm9yZzEuZXhhbXBsZS5jb20wHhcNMjIwOTI3MDgzMDAwWhcNMzcwOTIzMDgzMDAw\nWjBwMQswCQYDVQQGEwJVUzEXMBUGA1UECBMOTm9ydGggQ2Fyb2xpbmExDzANBgNV\nBAcTBkR1cmhhbTEZMBcGA1UEChMQb3JnMS5leGFtcGxlLmNvbTEcMBoGA1UEAxMT\nY2Eub3JnMS5leGFtcGxlLmNvbTBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABDC3\nspCTT3pjfFXxkX/SFuBgWRiceR8rSoCNQOnIPeNGZK8xl2Zr7VuY06gqy9c+ecSU\nPUWaXiCQxiLgZuS6TOWjRTBDMA4GA1UdDwEB/wQEAwIBBjASBgNVHRMBAf8ECDAG\nAQH/AgEBMB0GA1UdDgQWBBRFWSc7GZqcJJyJjXSEspzgAYInGzAKBggqhkjOPQQD\nAgNIADBFAiEAodqc+adkiMuU6iv1IF8uJ/nMQbvMGoP3pb2827QzDosCICOw6W+y\nuH03H3RO6KhOcS1ZzPjspyjrcC+dwzYX4DpW\n-----END CERTIFICATE-----\n" }, "grpcOptions": { "ssl-target-name-override": "peer0.org1.example.com", @@ -34,9 +34,9 @@ } }, "peer0.org2.example.com": { - "url": "grpcs://s2:9051", + "url": "grpcs://teraflow.nlehd.de:9051", "tlsCACerts": { - "pem": "-----BEGIN CERTIFICATE-----\nMIICHzCCAcWgAwIBAgIUejv57h6dJkVIM2R1YnlqykkvG7gwCgYIKoZIzj0EAwIw\nbDELMAkGA1UEBhMCVUsxEjAQBgNVBAgTCUhhbXBzaGlyZTEQMA4GA1UEBxMHSHVy\nc2xleTEZMBcGA1UEChMQb3JnMi5leGFtcGxlLmNvbTEcMBoGA1UEAxMTY2Eub3Jn\nMi5leGFtcGxlLmNvbTAeFw0yMjA3MDUwOTQ0MDBaFw0zNzA3MDEwOTQ0MDBaMGwx\nCzAJBgNVBAYTAlVLMRIwEAYDVQQIEwlIYW1wc2hpcmUxEDAOBgNVBAcTB0h1cnNs\nZXkxGTAXBgNVBAoTEG9yZzIuZXhhbXBsZS5jb20xHDAaBgNVBAMTE2NhLm9yZzIu\nZXhhbXBsZS5jb20wWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAASFZqoisCIgZyMM\n8e0YBA+jxH/+Fc4Y4OkEl5uGRXGl9s0OemCdvhlX9K+esX2DVk1st1PMfTEj/six\n9XPpVqzNo0UwQzAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB/wIBATAd\nBgNVHQ4EFgQUPEyzGBlZEjguoJB16wAmoH2bAh8wCgYIKoZIzj0EAwIDSAAwRQIh\nAL6DAWgrqRtbYoQ0oYAr/2vze0JtQcXoqiQKlyvYkUBbAiB/uSHBk3NwjzI8t8iW\nzQzr5eNy5JwOO0SWwPEv4Ev9iQ==\n-----END CERTIFICATE-----\n" + "pem": "-----BEGIN CERTIFICATE-----\nMIICHjCCAcWgAwIBAgIUL48scgv9ItATkBjSNhzYDjLUDsAwCgYIKoZIzj0EAwIw\nbDELMAkGA1UEBhMCVUsxEjAQBgNVBAgTCUhhbXBzaGlyZTEQMA4GA1UEBxMHSHVy\nc2xleTEZMBcGA1UEChMQb3JnMi5leGFtcGxlLmNvbTEcMBoGA1UEAxMTY2Eub3Jn\nMi5leGFtcGxlLmNvbTAeFw0yMjA5MjcwODMwMDBaFw0zNzA5MjMwODMwMDBaMGwx\nCzAJBgNVBAYTAlVLMRIwEAYDVQQIEwlIYW1wc2hpcmUxEDAOBgNVBAcTB0h1cnNs\nZXkxGTAXBgNVBAoTEG9yZzIuZXhhbXBsZS5jb20xHDAaBgNVBAMTE2NhLm9yZzIu\nZXhhbXBsZS5jb20wWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ5qz8FfrEQ5S08\nr/avPyTrF2grXj5L4DnbvF4YEZ5Usnbm8Svovu7PO8uiVcwT5vrt6ssOdpBFZYu3\nNndpojnYo0UwQzAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB/wIBATAd\nBgNVHQ4EFgQUYcp7axYV9AaIptYQqhiCL0VDmXQwCgYIKoZIzj0EAwIDRwAwRAIg\nWT1V8/6flUPNcBkmbtEEKf83k7+6sR9k1a2wtVeJFnQCIE0ZSIL3k0dKQydQBpiz\nPcZZUULvQivcMlIsw5+mjIGc\n-----END CERTIFICATE-----\n" }, "grpcOptions": { "ssl-target-name-override": "peer0.org2.example.com", @@ -46,11 +46,11 @@ }, "certificateAuthorities": { "ca.org1.example.com": { - "url": "https://s2:7054", + "url": "https://teraflow.nlehd.de:7054", "caName": "ca-org1", "tlsCACerts": { "pem": [ - "-----BEGIN CERTIFICATE-----\nMIICJjCCAc2gAwIBAgIUWZ4l32loO9+FM0FYw61y3dUF5a0wCgYIKoZIzj0EAwIw\ncDELMAkGA1UEBhMCVVMxFzAVBgNVBAgTDk5vcnRoIENhcm9saW5hMQ8wDQYDVQQH\nEwZEdXJoYW0xGTAXBgNVBAoTEG9yZzEuZXhhbXBsZS5jb20xHDAaBgNVBAMTE2Nh\nLm9yZzEuZXhhbXBsZS5jb20wHhcNMjIwNzA1MDk0NDAwWhcNMzcwNzAxMDk0NDAw\nWjBwMQswCQYDVQQGEwJVUzEXMBUGA1UECBMOTm9ydGggQ2Fyb2xpbmExDzANBgNV\nBAcTBkR1cmhhbTEZMBcGA1UEChMQb3JnMS5leGFtcGxlLmNvbTEcMBoGA1UEAxMT\nY2Eub3JnMS5leGFtcGxlLmNvbTBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABNPg\nyfDxHr4ZmFp3HB19f27vfc1YTKBnznLqIFwVad2Y+eXfni8DnTRNGgwdkG9uIK2L\n4Y9mwlKG/mTNx629G4GjRTBDMA4GA1UdDwEB/wQEAwIBBjASBgNVHRMBAf8ECDAG\nAQH/AgEBMB0GA1UdDgQWBBSZlT6qe+DAGpEBXyMxzidqCkQ4PjAKBggqhkjOPQQD\nAgNHADBEAiAIG5jwBGddB9CwocmjAzFv8+e7+0bvNSwjrG229QogTgIgbTNoC33P\nmbR5ChlkUAW2t41hTOCSMIwLAlvEwpeCnAk=\n-----END CERTIFICATE-----\n" + "-----BEGIN CERTIFICATE-----\nMIICJzCCAc2gAwIBAgIUb5gDMfVeVdQjFkK3uC8LtlogN+gwCgYIKoZIzj0EAwIw\ncDELMAkGA1UEBhMCVVMxFzAVBgNVBAgTDk5vcnRoIENhcm9saW5hMQ8wDQYDVQQH\nEwZEdXJoYW0xGTAXBgNVBAoTEG9yZzEuZXhhbXBsZS5jb20xHDAaBgNVBAMTE2Nh\nLm9yZzEuZXhhbXBsZS5jb20wHhcNMjIwOTI3MDgzMDAwWhcNMzcwOTIzMDgzMDAw\nWjBwMQswCQYDVQQGEwJVUzEXMBUGA1UECBMOTm9ydGggQ2Fyb2xpbmExDzANBgNV\nBAcTBkR1cmhhbTEZMBcGA1UEChMQb3JnMS5leGFtcGxlLmNvbTEcMBoGA1UEAxMT\nY2Eub3JnMS5leGFtcGxlLmNvbTBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABDC3\nspCTT3pjfFXxkX/SFuBgWRiceR8rSoCNQOnIPeNGZK8xl2Zr7VuY06gqy9c+ecSU\nPUWaXiCQxiLgZuS6TOWjRTBDMA4GA1UdDwEB/wQEAwIBBjASBgNVHRMBAf8ECDAG\nAQH/AgEBMB0GA1UdDgQWBBRFWSc7GZqcJJyJjXSEspzgAYInGzAKBggqhkjOPQQD\nAgNIADBFAiEAodqc+adkiMuU6iv1IF8uJ/nMQbvMGoP3pb2827QzDosCICOw6W+y\nuH03H3RO6KhOcS1ZzPjspyjrcC+dwzYX4DpW\n-----END CERTIFICATE-----\n" ] }, "httpOptions": { @@ -60,9 +60,9 @@ }, "orderers": { "orderer0.example.com": { - "url": "grpcs://s2:7050", + "url": "grpcs://teraflow.nlehd.de:7050", "tlsCACerts": { - "pem": "-----BEGIN CERTIFICATE-----\nMIICCjCCAbGgAwIBAgIURV0KgZTOagIAIU7wRcSg/mKl5RUwCgYIKoZIzj0EAwIw\nYjELMAkGA1UEBhMCVVMxETAPBgNVBAgTCE5ldyBZb3JrMREwDwYDVQQHEwhOZXcg\nWW9yazEUMBIGA1UEChMLZXhhbXBsZS5jb20xFzAVBgNVBAMTDmNhLmV4YW1wbGUu\nY29tMB4XDTIyMDcwNTA5NDQwMFoXDTM3MDcwMTA5NDQwMFowYjELMAkGA1UEBhMC\nVVMxETAPBgNVBAgTCE5ldyBZb3JrMREwDwYDVQQHEwhOZXcgWW9yazEUMBIGA1UE\nChMLZXhhbXBsZS5jb20xFzAVBgNVBAMTDmNhLmV4YW1wbGUuY29tMFkwEwYHKoZI\nzj0CAQYIKoZIzj0DAQcDQgAEWrOugtJgVLAKZRw9jaC15RUbVuTm0ZmsqNyiQrKQ\nYawLE6fs+QIU7WQ25fxlYtmGB2S8nofGCDuwaoTevW0GoaNFMEMwDgYDVR0PAQH/\nBAQDAgEGMBIGA1UdEwEB/wQIMAYBAf8CAQEwHQYDVR0OBBYEFLKBzGXaQg2Irr57\npjoFYZ9F1NoNMAoGCCqGSM49BAMCA0cAMEQCIB1YdgOEsATw2GeaFmq6nqWg0JDT\np456JB/reFmnPWdJAiBPo5H9sMh+MpP4R5ue7nuwYK7SEJ1DOJqWMlPuNhVgtA==\n-----END CERTIFICATE-----\n" + "pem": "-----BEGIN CERTIFICATE-----\nMIICCzCCAbGgAwIBAgIUdZQo3q4OqyxIkidmAV4QkewCylIwCgYIKoZIzj0EAwIw\nYjELMAkGA1UEBhMCVVMxETAPBgNVBAgTCE5ldyBZb3JrMREwDwYDVQQHEwhOZXcg\nWW9yazEUMBIGA1UEChMLZXhhbXBsZS5jb20xFzAVBgNVBAMTDmNhLmV4YW1wbGUu\nY29tMB4XDTIyMDkyNzA4MzAwMFoXDTM3MDkyMzA4MzAwMFowYjELMAkGA1UEBhMC\nVVMxETAPBgNVBAgTCE5ldyBZb3JrMREwDwYDVQQHEwhOZXcgWW9yazEUMBIGA1UE\nChMLZXhhbXBsZS5jb20xFzAVBgNVBAMTDmNhLmV4YW1wbGUuY29tMFkwEwYHKoZI\nzj0CAQYIKoZIzj0DAQcDQgAERR0UzsHSFoyON+9Noxmk1IhnTvSdLWGgEpEwrqVr\n5DwitkeJwRWq134JBTmXuZzsUG87oN6Hr94XAEe4j9Zq8qNFMEMwDgYDVR0PAQH/\nBAQDAgEGMBIGA1UdEwEB/wQIMAYBAf8CAQEwHQYDVR0OBBYEFN8XsELp/X0akrlJ\nY3/BWo2jZS3cMAoGCCqGSM49BAMCA0gAMEUCIQCZYYXW/0h3Kq4BmROpOHfrondg\nopf5LndeujYlH3i8tQIgCtpTQiDXZd+IAUduRmn7a46CwJSbjYbXFVX5vumIbE4=\n-----END CERTIFICATE-----\n" }, "grpcOptions": { "ssl-target-name-override": "orderer0.example.com", diff --git a/src/dlt/gateway/settings.gradle.kts b/src/dlt/gateway/settings.gradle.kts index 0ebdd07b29682c72c65695e4f5655437ed11d74d..67683a7440a06dd490a07dce3e3858ec8242f1ea 100644 --- a/src/dlt/gateway/settings.gradle.kts +++ b/src/dlt/gateway/settings.gradle.kts @@ -1,3 +1,3 @@ -rootProject.name = "dlt" +rootProject.name = "gateway" diff --git a/src/dlt/gateway/src/main/kotlin/Main.kt b/src/dlt/gateway/src/main/kotlin/Main.kt index 68a820ee9feb622a5f3d8429dd56b905b0e1a1b4..c57c9e980853e84d3c10551588dc8d94c14ad40e 100644 --- a/src/dlt/gateway/src/main/kotlin/Main.kt +++ b/src/dlt/gateway/src/main/kotlin/Main.kt @@ -12,7 +12,7 @@ // duplication of the object or source code - either totally or in // part - is strictly prohibited. // -// Copyright (c) 2021 NEC Laboratories Europe GmbH +// Copyright (c) 2022 NEC Laboratories Europe GmbH // All Rights Reserved. // // Authors: Konstantin Munichev @@ -35,107 +35,127 @@ // // THIS HEADER MAY NOT BE EXTRACTED OR MODIFIED IN ANY WAY. -import io.ktor.client.* -import io.ktor.client.engine.cio.* -import io.ktor.client.features.* -import io.ktor.client.request.* -import io.ktor.utils.io.jvm.javaio.* -import kotlinx.serialization.ExperimentalSerializationApi -import proto.Config -import proto.Config.DltConfig - -@OptIn(ExperimentalSerializationApi::class) -suspend fun main(args: Array) { - // TODO: default configuration file - val cfg = DltConfig.newBuilder().setWallet("wallet").setConnectionFile("config/connection-org1.json") - .setUser("appUser") - .setChannel("dlt") - .setContract("basic").setCaCertFile("config/ca.org1.example.com-cert.pem").setCaUrl("https://s2:7054") - .setCaAdmin("admin").setCaAdminSecret("adminpw").setMsp("Org1MSP").setAffiliation("org1.department1") - .build() - val cfgBytes = cfg.toByteArray() +import context.ContextOuterClass +import io.grpc.ManagedChannel +import io.grpc.ManagedChannelBuilder +import kotlinx.coroutines.GlobalScope +import kotlinx.coroutines.launch +import kotlinx.coroutines.runBlocking +import dlt.DltGateway +import dlt.DltGatewayServiceGrpcKt +import java.io.Closeable +import java.util.* +import java.util.concurrent.TimeUnit - val client = HttpClient(CIO) { - HttpResponseValidator { - validateResponse { response -> - println(response.status) - } - } - } +class DltServiceClient(private val channel: ManagedChannel) : Closeable { + private val stub: DltGatewayServiceGrpcKt.DltGatewayServiceCoroutineStub = + DltGatewayServiceGrpcKt.DltGatewayServiceCoroutineStub(channel) - try { - client.post("http://localhost:8080/dlt/configure") { - body = cfgBytes - } - } catch (e: ClientRequestException) { - println(e.response.status) - println(String(e.response.content.toInputStream().readAllBytes())) + suspend fun putData(data: DltGateway.DltRecord) { + println("Sending record ${data.recordId}...") + val response = stub.recordToDlt(data) + println("Response: ${response.recordId}") } - try { - val config = client.get("http://localhost:8080/dlt/configure") - println(DltConfig.parseFrom(config)) - } catch (e: ClientRequestException) { - println(e.response.status) - println(String(e.response.content.toInputStream().readAllBytes())) + suspend fun getData(id: DltGateway.DltRecordId) { + println("Requesting record $id...") + val response = stub.getFromDlt(id) + println("Got data: $response") } - val uuid = "41f4d2e2-f4ef-4c81-872a-c32f2d26b2ca" - try { - val record = client.get("http://localhost:8080/dlt/record") { - body = uuid + fun subscribe(filter: DltGateway.DltRecordSubscription) { + val subscription = stub.subscribeToDlt(filter) + GlobalScope.launch { + subscription.collect { + println("Got subscription event") + println(it) + } } - println(Config.DltRecord.parseFrom(record)) - } catch (e: ClientRequestException) { - println(e.response.status) - println(String(e.response.content.toInputStream().readAllBytes())) } - val id = Config.DltRecordId.newBuilder().setUuid(uuid).build() - val record = Config.DltRecord.newBuilder().setId(id).setOperation(Config.DltRecordOperation.ADD) - .setType(Config.DltRecordType.DEVICE).setJson("{}").build() - try { - val result = client.post("http://localhost:8080/dlt/record") { - body = record.toByteArray() - } - println(String(result)) - val requestedRecord = client.get("http://localhost:8080/dlt/record") { - body = uuid - } - println(Config.DltRecord.parseFrom(requestedRecord)) - } catch (e: ClientRequestException) { - println(e.response.status) - println(String(e.response.content.toInputStream().readAllBytes())) + override fun close() { + channel.shutdown().awaitTermination(5, TimeUnit.SECONDS) } +} - try { - val newRecord = Config.DltRecord.newBuilder().setId(id).setOperation(Config.DltRecordOperation.UPDATE) - .setType(Config.DltRecordType.UNKNOWN).setJson("{}").build() - val result = client.post("http://localhost:8080/dlt/record") { - body = newRecord.toByteArray() - } - println(String(result)) - val requestedRecord = client.get("http://localhost:8080/dlt/record") { - body = uuid - } - println(Config.DltRecord.parseFrom(requestedRecord)) - } catch (e: ClientRequestException) { - println(e.response.status) - println(String(e.response.content.toInputStream().readAllBytes())) - } +fun main() = runBlocking { + val port = 50051 + val channel = ManagedChannelBuilder.forAddress("localhost", port).usePlaintext().build() + + val client = DltServiceClient(channel) + + val domainUuid = UUID.randomUUID().toString() + val recordUuid = UUID.randomUUID().toString() + println("New domain uuid $domainUuid") + println("New record uuid $recordUuid") + + val id = DltGateway.DltRecordId.newBuilder() + .setDomainUuid( + ContextOuterClass.Uuid.newBuilder() + .setUuid(domainUuid) + ) + .setRecordUuid( + ContextOuterClass.Uuid.newBuilder() + .setUuid(recordUuid) + ) + .setType(DltGateway.DltRecordTypeEnum.DLTRECORDTYPE_SERVICE) + .build() + + val subscription = DltGateway.DltRecordSubscription.newBuilder() + .addType(DltGateway.DltRecordTypeEnum.DLTRECORDTYPE_CONTEXT) + .addType(DltGateway.DltRecordTypeEnum.DLTRECORDTYPE_LINK) + .addType(DltGateway.DltRecordTypeEnum.DLTRECORDTYPE_SERVICE) + .addOperation(DltGateway.DltRecordOperationEnum.DLTRECORDOPERATION_ADD) + .addOperation(DltGateway.DltRecordOperationEnum.DLTRECORDOPERATION_UPDATE) + .addOperation(DltGateway.DltRecordOperationEnum.DLTRECORDOPERATION_DELETE) + .build() + + client.subscribe(subscription) + + Thread.sleep(5000) + + val data = DltGateway.DltRecord.newBuilder() + .setRecordId(id) + .setOperation(DltGateway.DltRecordOperationEnum.DLTRECORDOPERATION_ADD) + .setDataJson("\"{\"device_config\": {\"config_rules\": []}, \"device_drivers\": []," + + "\"device_endpoints\": [], \"device_id\": {\"device_uuid\": {\"uuid\": \"dev-12345\"}}," + + "\"device_operational_status\": \"DEVICEOPERATIONALSTATUS_ENABLED\"," + + "\"device_type\": \"packet-router\"}\", \"operation\": \"DLTRECORDOPERATION_ADD\"," + + "\"record_id\": {\"domain_uuid\": {\"uuid\": \"tfs-a\"}, \"record_uuid\": {\"uuid\": \"dev-12345\"}," + + "\"type\": \"DLTRECORDTYPE_DEVICE\"}") + .build() + + println("sending new record") + client.putData(data) + client.getData(id) + + Thread.sleep(5000) + + val updateData = DltGateway.DltRecord.newBuilder() + .setRecordId(id) + .setOperation(DltGateway.DltRecordOperationEnum.DLTRECORDOPERATION_UPDATE) + .setDataJson("{\"name\": \"test\"}") + .build() + + println("updating record") + client.putData(updateData) + client.getData(id) + + Thread.sleep(5000) + + val removeData = DltGateway.DltRecord.newBuilder() + .setRecordId(id) + .setOperation(DltGateway.DltRecordOperationEnum.DLTRECORDOPERATION_DELETE) + .setDataJson("{\"name\": \"test\"}") + .build() + + println("removing record") + client.putData(removeData) try { - val newRecord = Config.DltRecord.newBuilder().setId(id).setOperation(Config.DltRecordOperation.DISABLE).build() - val result = client.post("http://localhost:8080/dlt/record") { - body = newRecord.toByteArray() - } - println(String(result)) - val requestedRecord = client.get("http://localhost:8080/dlt/record") { - body = uuid - } - println(Config.DltRecord.parseFrom(requestedRecord)) - } catch (e: ClientRequestException) { - println(e.response.status) - println(String(e.response.content.toInputStream().readAllBytes())) + client.getData(id) + } catch (e: Exception) { + println(e.toString()) } -} \ No newline at end of file + Thread.sleep(5000) +} diff --git a/src/dlt/gateway/src/main/kotlin/fabric/ConnectGateway.kt b/src/dlt/gateway/src/main/kotlin/fabric/ConnectGateway.kt index 245bd4828776837802a1303787d5cfc34a5bffbc..00ec40d57dcd8bc4da18f30a6bed6f1d2a032b48 100644 --- a/src/dlt/gateway/src/main/kotlin/fabric/ConnectGateway.kt +++ b/src/dlt/gateway/src/main/kotlin/fabric/ConnectGateway.kt @@ -12,7 +12,7 @@ // duplication of the object or source code - either totally or in // part - is strictly prohibited. // -// Copyright (c) 2021 NEC Laboratories Europe GmbH +// Copyright (c) 2022 NEC Laboratories Europe GmbH // All Rights Reserved. // // Authors: Konstantin Munichev diff --git a/src/dlt/gateway/src/main/kotlin/fabric/FabricConnector.kt b/src/dlt/gateway/src/main/kotlin/fabric/FabricConnector.kt index d7c163954ec4f1b63b5e646e714d493050e114c0..af6592be93c86e316a64cd23edd46bbbdc240cfd 100644 --- a/src/dlt/gateway/src/main/kotlin/fabric/FabricConnector.kt +++ b/src/dlt/gateway/src/main/kotlin/fabric/FabricConnector.kt @@ -12,7 +12,7 @@ // duplication of the object or source code - either totally or in // part - is strictly prohibited. // -// Copyright (c) 2021 NEC Laboratories Europe GmbH +// Copyright (c) 2022 NEC Laboratories Europe GmbH // All Rights Reserved. // // Authors: Konstantin Munichev @@ -37,6 +37,11 @@ package fabric +import context.ContextOuterClass +import dlt.DltGateway.DltRecord +import dlt.DltGateway.DltRecordEvent +import kotlinx.coroutines.channels.Channel +import kotlinx.coroutines.runBlocking import org.hyperledger.fabric.gateway.Contract import org.hyperledger.fabric.gateway.ContractEvent import org.hyperledger.fabric.gateway.Wallet @@ -53,6 +58,11 @@ class FabricConnector(val config: Config.DltConfig) { private val wallet: Wallet private val contract: Contract + private val channels: MutableList> = mutableListOf() + + private val encoder: Base64.Encoder = Base64.getEncoder() + private val decoder: Base64.Decoder = Base64.getDecoder() + init { // Create a CA client for interacting with the CA. val props = Properties() @@ -65,7 +75,42 @@ class FabricConnector(val config: Config.DltConfig) { // Create a wallet for managing identities wallet = Wallets.newFileSystemWallet(Paths.get(config.wallet)) contract = connect() - subscribeForEvents() + + fabricSubscribe() + } + + private fun fabricSubscribe() { + val consumer = Consumer { event: ContractEvent? -> + run { + println("new event detected") + val record = DltRecord.parseFrom(decoder.decode(event?.payload?.get())) + println(record.recordId.recordUuid) + val eventType: ContextOuterClass.EventTypeEnum = when (event?.name) { + "Add" -> ContextOuterClass.EventTypeEnum.EVENTTYPE_CREATE + "Update" -> ContextOuterClass.EventTypeEnum.EVENTTYPE_UPDATE + "Remove" -> ContextOuterClass.EventTypeEnum.EVENTTYPE_REMOVE + else -> ContextOuterClass.EventTypeEnum.EVENTTYPE_UNDEFINED + } + val pbEvent = DltRecordEvent.newBuilder() + .setEvent( + ContextOuterClass.Event.newBuilder() + .setTimestamp( + ContextOuterClass.Timestamp.newBuilder() + .setTimestamp(System.currentTimeMillis().toDouble()) + ) + .setEventType(eventType) + ) + .setRecordId(record.recordId) + .build() + + runBlocking { + channels.forEach { + it.trySend(pbEvent) + } + } + } + } + contract.addContractListener(consumer) } fun connect(): Contract { @@ -74,44 +119,60 @@ class FabricConnector(val config: Config.DltConfig) { return getContract(config, wallet) } - fun putData(record: Config.DltRecord): String { - println(record.type.toString()) - return String( + fun putData(record: DltRecord): String { + println(record.toString()) + + try { contract.submitTransaction( "AddRecord", - record.id.uuid, - record.type.number.toString(), - record.json + record.recordId.recordUuid.uuid, + encoder.encodeToString(record.toByteArray()) ) - ) + } catch (e: Exception) { + println(e.toString()) + return e.toString() + } + return "" } - fun getData(uuid: String): Config.DltRecord { - val result = contract.evaluateTransaction("GetRecord", uuid) - return Config.DltRecord.parseFrom(result) + fun getData(uuid: String): DltRecord { + return try { + val result = contract.evaluateTransaction("GetRecord", uuid) + DltRecord.parseFrom(decoder.decode(result)) + } catch (e: Exception) { + println(e.toString()) + DltRecord.getDefaultInstance() + } } - fun updateData(record: Config.DltRecord): String { - return String( + fun updateData(record: DltRecord): String { + try { contract.submitTransaction( "UpdateRecord", - record.id.uuid, - record.type.number.toString(), - record.json + record.recordId.recordUuid.uuid, + encoder.encodeToString(record.toByteArray()) ) - ) + } catch (e: Exception) { + return e.toString() + } + return "" } - fun deleteData(uuid: String): String { - return String(contract.submitTransaction("DeactivateRecord", uuid)) + fun deleteData(record: DltRecord): String { + try { + contract.submitTransaction( + "DeleteRecord", + record.recordId.recordUuid.uuid, + ) + } catch (e: Exception) { + return e.toString() + } + return "" } - private fun subscribeForEvents() { - val consumer = Consumer { - event: ContractEvent? -> run { - println(event?.payload?.get()?.let { String(it) }) - } - } - contract.addContractListener(consumer) + fun subscribeForEvents(): Channel { + val produceCh = Channel() + channels.add(produceCh) + return produceCh } } \ No newline at end of file diff --git a/src/dlt/gateway/src/main/kotlin/grpc/FabricServer.kt b/src/dlt/gateway/src/main/kotlin/grpc/FabricServer.kt new file mode 100644 index 0000000000000000000000000000000000000000..9b4e1f4dc38d80c22847ae213053119b301bdf3d --- /dev/null +++ b/src/dlt/gateway/src/main/kotlin/grpc/FabricServer.kt @@ -0,0 +1,94 @@ +// NEC Laboratories Europe GmbH +// +// PROPRIETARY INFORMATION +// +// The software and its source code contain valuable trade secrets and +// shall be maintained in confidence and treated as confidential +// information. The software may only be used for evaluation and/or +// testing purposes, unless otherwise explicitly stated in a written +// agreement with NEC Laboratories Europe GmbH. +// +// Any unauthorized publication, transfer to third parties or +// duplication of the object or source code - either totally or in +// part - is strictly prohibited. +// +// Copyright (c) 2022 NEC Laboratories Europe GmbH +// All Rights Reserved. +// +// Authors: Konstantin Munichev +// +// +// NEC Laboratories Europe GmbH DISCLAIMS ALL WARRANTIES, EITHER +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO IMPLIED WARRANTIES +// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE AND THE +// WARRANTY AGAINST LATENT DEFECTS, WITH RESPECT TO THE PROGRAM AND +// THE ACCOMPANYING DOCUMENTATION. +// +// NO LIABILITIES FOR CONSEQUENTIAL DAMAGES: IN NO EVENT SHALL NEC +// Laboratories Europe GmbH or ANY OF ITS SUBSIDIARIES BE LIABLE FOR +// ANY DAMAGES WHATSOEVER (INCLUDING, WITHOUT LIMITATION, DAMAGES FOR +// LOSS OF BUSINESS PROFITS, BUSINESS INTERRUPTION, LOSS OF +// INFORMATION, OR OTHER PECUNIARY LOSS AND INDIRECT, CONSEQUENTIAL, +// INCIDENTAL, ECONOMIC OR PUNITIVE DAMAGES) ARISING OUT OF THE USE OF +// OR INABILITY TO USE THIS PROGRAM, EVEN IF NEC Laboratories Europe +// GmbH HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. +// +// THIS HEADER MAY NOT BE EXTRACTED OR MODIFIED IN ANY WAY. + +package grpc + +import fabric.FabricConnector +import io.grpc.Server +import io.grpc.ServerBuilder +import proto.Config +import kotlin.random.Random +import kotlin.random.nextUInt + +class FabricServer(val port: Int) { + private val server: Server + + init { + val id = Random.nextUInt() + val cfg = Config.DltConfig.newBuilder().setWallet("wallet$id").setConnectionFile("config/connection-org1.json") + .setUser("appUser$id") + .setChannel("dlt") + .setContract("basic").setCaCertFile("config/ca.org1.example.com-cert.pem").setCaUrl("https://teraflow.nlehd.de:7054") + .setCaAdmin("admin").setCaAdminSecret("adminpw").setMsp("Org1MSP").setAffiliation("org1.department1") + .build() + val connector = FabricConnector(cfg) + + val dltService = DLTService(connector) + server = ServerBuilder + .forPort(port) + .addService(dltService) + .build() + + } + + fun start() { + server.start() + println("Server started, listening on $port") + Runtime.getRuntime().addShutdownHook( + Thread { + println("Shutting down...") + this@FabricServer.stop() + println("Server shut down") + } + ) + } + + private fun stop() { + server.shutdown() + } + + fun blockUntilShutdown() { + server.awaitTermination() + } +} + +fun main() { + val port = 50051 + val server = FabricServer(port) + server.start() + server.blockUntilShutdown() +} diff --git a/src/dlt/gateway/src/main/kotlin/grpc/GrpcHandler.kt b/src/dlt/gateway/src/main/kotlin/grpc/GrpcHandler.kt new file mode 100644 index 0000000000000000000000000000000000000000..d39c24a1a87aacb32d828dcba8208b34312d7409 --- /dev/null +++ b/src/dlt/gateway/src/main/kotlin/grpc/GrpcHandler.kt @@ -0,0 +1,95 @@ +// NEC Laboratories Europe GmbH +// +// PROPRIETARY INFORMATION +// +// The software and its source code contain valuable trade secrets and +// shall be maintained in confidence and treated as confidential +// information. The software may only be used for evaluation and/or +// testing purposes, unless otherwise explicitly stated in a written +// agreement with NEC Laboratories Europe GmbH. +// +// Any unauthorized publication, transfer to third parties or +// duplication of the object or source code - either totally or in +// part - is strictly prohibited. +// +// Copyright (c) 2022 NEC Laboratories Europe GmbH +// All Rights Reserved. +// +// Authors: Konstantin Munichev +// +// +// NEC Laboratories Europe GmbH DISCLAIMS ALL WARRANTIES, EITHER +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO IMPLIED WARRANTIES +// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE AND THE +// WARRANTY AGAINST LATENT DEFECTS, WITH RESPECT TO THE PROGRAM AND +// THE ACCOMPANYING DOCUMENTATION. +// +// NO LIABILITIES FOR CONSEQUENTIAL DAMAGES: IN NO EVENT SHALL NEC +// Laboratories Europe GmbH or ANY OF ITS SUBSIDIARIES BE LIABLE FOR +// ANY DAMAGES WHATSOEVER (INCLUDING, WITHOUT LIMITATION, DAMAGES FOR +// LOSS OF BUSINESS PROFITS, BUSINESS INTERRUPTION, LOSS OF +// INFORMATION, OR OTHER PECUNIARY LOSS AND INDIRECT, CONSEQUENTIAL, +// INCIDENTAL, ECONOMIC OR PUNITIVE DAMAGES) ARISING OUT OF THE USE OF +// OR INABILITY TO USE THIS PROGRAM, EVEN IF NEC Laboratories Europe +// GmbH HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. +// +// THIS HEADER MAY NOT BE EXTRACTED OR MODIFIED IN ANY WAY. + +package grpc + +import fabric.FabricConnector +import kotlinx.coroutines.flow.Flow +import kotlinx.coroutines.flow.consumeAsFlow +import context.ContextOuterClass +import dlt.DltGateway +import dlt.DltGatewayServiceGrpcKt + +class DLTService(private val connector: FabricConnector) : + DltGatewayServiceGrpcKt.DltGatewayServiceCoroutineImplBase() { + override suspend fun recordToDlt(request: DltGateway.DltRecord): DltGateway.DltRecordStatus { + println("Incoming request ${request.recordId.recordUuid}") + val error = when (request.operation) { + DltGateway.DltRecordOperationEnum.DLTRECORDOPERATION_ADD -> { + println("Adding new record") + connector.putData(request) + } + DltGateway.DltRecordOperationEnum.DLTRECORDOPERATION_UPDATE -> { + println("Updating record") + connector.updateData(request) + } + DltGateway.DltRecordOperationEnum.DLTRECORDOPERATION_DELETE -> { + println("Deleting record") + connector.deleteData(request) + } + else -> "Undefined or unknown operation" + } + + val dltStatusEnum: DltGateway.DltRecordStatusEnum = if (error == "") { + DltGateway.DltRecordStatusEnum.DLTRECORDSTATUS_SUCCEEDED + } else { + DltGateway.DltRecordStatusEnum.DLTRECORDSTATUS_FAILED + } + return DltGateway.DltRecordStatus.newBuilder() + .setRecordId(request.recordId) + .setStatus(dltStatusEnum) + .setErrorMessage(error) + .build() + } + + override suspend fun getFromDlt(request: DltGateway.DltRecordId): DltGateway.DltRecord { + return connector.getData(request.recordUuid.uuid) + } + + override fun subscribeToDlt(request: DltGateway.DltRecordSubscription): Flow { + println("Subscription request: $request") + return connector.subscribeForEvents().consumeAsFlow() + } + + override suspend fun getDltStatus(request: ContextOuterClass.TeraFlowController): DltGateway.DltPeerStatus { + return super.getDltStatus(request) + } + + override suspend fun getDltPeers(request: ContextOuterClass.Empty): DltGateway.DltPeerStatusList { + return super.getDltPeers(request) + } +} \ No newline at end of file diff --git a/src/dlt/gateway/src/main/kotlin/http/Server.kt b/src/dlt/gateway/src/main/kotlin/http/Server.kt deleted file mode 100644 index 4e3400af36b32726096b177da230c8baa4bb3dab..0000000000000000000000000000000000000000 --- a/src/dlt/gateway/src/main/kotlin/http/Server.kt +++ /dev/null @@ -1,162 +0,0 @@ -// NEC Laboratories Europe GmbH -// -// PROPRIETARY INFORMATION -// -// The software and its source code contain valuable trade secrets and -// shall be maintained in confidence and treated as confidential -// information. The software may only be used for evaluation and/or -// testing purposes, unless otherwise explicitly stated in a written -// agreement with NEC Laboratories Europe GmbH. -// -// Any unauthorized publication, transfer to third parties or -// duplication of the object or source code - either totally or in -// part - is strictly prohibited. -// -// Copyright (c) 2021 NEC Laboratories Europe GmbH -// All Rights Reserved. -// -// Authors: Konstantin Munichev -// -// -// NEC Laboratories Europe GmbH DISCLAIMS ALL WARRANTIES, EITHER -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO IMPLIED WARRANTIES -// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE AND THE -// WARRANTY AGAINST LATENT DEFECTS, WITH RESPECT TO THE PROGRAM AND -// THE ACCOMPANYING DOCUMENTATION. -// -// NO LIABILITIES FOR CONSEQUENTIAL DAMAGES: IN NO EVENT SHALL NEC -// Laboratories Europe GmbH or ANY OF ITS SUBSIDIARIES BE LIABLE FOR -// ANY DAMAGES WHATSOEVER (INCLUDING, WITHOUT LIMITATION, DAMAGES FOR -// LOSS OF BUSINESS PROFITS, BUSINESS INTERRUPTION, LOSS OF -// INFORMATION, OR OTHER PECUNIARY LOSS AND INDIRECT, CONSEQUENTIAL, -// INCIDENTAL, ECONOMIC OR PUNITIVE DAMAGES) ARISING OUT OF THE USE OF -// OR INABILITY TO USE THIS PROGRAM, EVEN IF NEC Laboratories Europe -// GmbH HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. -// -// THIS HEADER MAY NOT BE EXTRACTED OR MODIFIED IN ANY WAY. - -package http - -import fabric.FabricConnector -import io.ktor.application.* -import io.ktor.features.* -import io.ktor.http.* -import io.ktor.request.* -import io.ktor.response.* -import io.ktor.routing.* -import io.ktor.server.engine.* -import io.ktor.server.netty.* -import kotlinx.coroutines.Dispatchers -import kotlinx.coroutines.sync.Mutex -import kotlinx.coroutines.withContext -import proto.Config -import proto.Config.DltConfig -import proto.Config.DltRecord - -class Server { - var connector: FabricConnector? = null - val port = 8080 - val mutex = Mutex() -} - -fun checkException(e: Exception): String { - if (e.message == null) return "" - return e.message!! -} - -fun main() { - val server = Server() - embeddedServer(Netty, port = server.port) { - install(ContentNegotiation) - routing { - post("/dlt/configure") { - withContext(Dispatchers.IO) { - try { - val data = call.receiveStream() - val config = DltConfig.parseFrom(data) - println(config) - server.mutex.lock() - server.connector = FabricConnector(config) - server.mutex.unlock() - call.response.status(HttpStatusCode.Created) - } - // TODO: catch exceptions one by one - catch (e: Exception) { - call.respond(HttpStatusCode.BadRequest, checkException(e)) - e.printStackTrace() - } - } - } - get("/dlt/configure") { - withContext(Dispatchers.IO) { - server.mutex.lock() - if (server.connector == null) { - server.mutex.unlock() - call.respond(HttpStatusCode.NotFound, "Not initialized") - } else { - val configBytes = server.connector!!.config.toByteArray() - server.mutex.unlock() - call.respond(HttpStatusCode.OK, configBytes) - } - } - } - post("/dlt/record") { - withContext(Dispatchers.IO) { - server.mutex.lock() - try { - if (server.connector == null) { - call.respond(HttpStatusCode.NotFound, "Not initialized") - } else { - val record = DltRecord.parseFrom(call.receiveStream()) - when (record.operation) { - Config.DltRecordOperation.ADD -> { - val result = server.connector!!.putData(record) - call.respond(HttpStatusCode.Created, result) - } - Config.DltRecordOperation.UPDATE -> { - val result = server.connector!!.updateData(record) - call.respond(HttpStatusCode.OK, result) - } - // TODO: Disable should require only uuid - Config.DltRecordOperation.DISABLE -> { - val result = server.connector!!.deleteData(record.id.uuid) - call.respond(HttpStatusCode.OK, result) - } - else -> { - call.respond(HttpStatusCode.BadRequest, "Invalid operation") - } - } - } - } - // TODO: catch exceptions one by one - catch (e: Exception) { - call.respond(HttpStatusCode.BadRequest, checkException(e)) - e.printStackTrace() - } - server.mutex.unlock() - } - } - get("/dlt/record") { - withContext(Dispatchers.IO) { - server.mutex.lock() - try { - if (server.connector == null) { - call.respond(HttpStatusCode.NotFound) - } else { - val uuid = call.receiveText() - println("Uuid request: $uuid") - val result = server.connector!!.getData(uuid) - call.respond(HttpStatusCode.OK, result.toByteArray()) - } - } - // TODO: catch exceptions one by one - catch (e: Exception) { - call.respond(HttpStatusCode.NotFound, checkException(e)) - e.printStackTrace() - } - server.mutex.unlock() - } - } - } - }.start(wait = true) -} diff --git a/src/dlt/gateway/src/main/kotlin/proto/Config.proto b/src/dlt/gateway/src/main/kotlin/proto/Config.proto index f492e63ce65924a98b38ea4925d43336f84d211c..b6d4c5614dca4b7784b96ebdb1d002f85c4fd0e2 100644 --- a/src/dlt/gateway/src/main/kotlin/proto/Config.proto +++ b/src/dlt/gateway/src/main/kotlin/proto/Config.proto @@ -12,7 +12,7 @@ // duplication of the object or source code - either totally or in // part - is strictly prohibited. // -// Copyright (c) 2021 NEC Laboratories Europe GmbH +// Copyright (c) 2022 NEC Laboratories Europe GmbH // All Rights Reserved. // // Authors: Konstantin Munichev @@ -52,29 +52,3 @@ message DltConfig { string msp = 10; string affiliation = 11; } - -message DltRecordId { - string uuid = 1; -} - -enum DltRecordOperation { - OP_UNSET = 0; - ADD = 1; - UPDATE = 2; - DISABLE = 3; -} - -enum DltRecordType { - RECORD_UNSET = 0; - UNKNOWN = 1; - SERVICE = 2; - DEVICE = 3; - SLICE = 4; -} - -message DltRecord { - DltRecordId id = 1; - DltRecordOperation operation = 2; - DltRecordType type = 3; - string json = 4; -} \ No newline at end of file diff --git a/src/dlt/mock_blockchain/Dockerfile b/src/dlt/mock_blockchain/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..22199b5f8f442e6d4617a2aed2e1dec9ad13e31a --- /dev/null +++ b/src/dlt/mock_blockchain/Dockerfile @@ -0,0 +1,68 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM python:3.9-slim + +# Install dependencies +RUN apt-get --yes --quiet --quiet update && \ + apt-get --yes --quiet --quiet install wget g++ && \ + rm -rf /var/lib/apt/lists/* + +# Set Python to show logs as they occur +ENV PYTHONUNBUFFERED=0 + +# Download the gRPC health probe +RUN GRPC_HEALTH_PROBE_VERSION=v0.2.0 && \ + wget -qO/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-amd64 && \ + chmod +x /bin/grpc_health_probe + +# Get generic Python packages +RUN python3 -m pip install --upgrade pip +RUN python3 -m pip install --upgrade setuptools wheel +RUN python3 -m pip install --upgrade pip-tools + +# Get common Python packages +# Note: this step enables sharing the previous Docker build steps among all the Python components +WORKDIR /var/teraflow +COPY common_requirements.in common_requirements.in +RUN pip-compile --quiet --output-file=common_requirements.txt common_requirements.in +RUN python3 -m pip install -r common_requirements.txt + +# Add common files into working directory +WORKDIR /var/teraflow/common +COPY src/common/. ./ +RUN rm -rf proto + +# Create proto sub-folder, copy .proto files, and generate Python code +RUN mkdir -p /var/teraflow/common/proto +WORKDIR /var/teraflow/common/proto +RUN touch __init__.py +COPY proto/*.proto ./ +RUN python3 -m grpc_tools.protoc -I=. --python_out=. --grpc_python_out=. *.proto +RUN rm *.proto +RUN find . -type f -exec sed -i -E 's/(import\ .*)_pb2/from . \1_pb2/g' {} \; + +# Create component sub-folders, get specific Python packages +RUN mkdir -p /var/teraflow/mock_blockchain +WORKDIR /var/teraflow/mock_blockchain +COPY src/dlt/mock_blockchain/requirements.in requirements.in +RUN pip-compile --quiet --output-file=requirements.txt requirements.in +RUN python3 -m pip install -r requirements.txt + +# Add component files into working directory +WORKDIR /var/teraflow +COPY src/dlt/mock_blockchain/. mock_blockchain + +# Start the service +ENTRYPOINT ["python", "-m", "mock_blockchain.service"] diff --git a/src/dlt/mock_blockchain/__init__.py b/src/dlt/mock_blockchain/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9953c820575d42fa88351cc8de022d880ba96e6a --- /dev/null +++ b/src/dlt/mock_blockchain/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/src/dlt/mock_blockchain/requirements.in b/src/dlt/mock_blockchain/requirements.in new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/src/dlt/mock_blockchain/service/__init__.py b/src/dlt/mock_blockchain/service/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9953c820575d42fa88351cc8de022d880ba96e6a --- /dev/null +++ b/src/dlt/mock_blockchain/service/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/src/dlt/mock_blockchain/service/__main__.py b/src/dlt/mock_blockchain/service/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..359c6990addfcd9278496338c50320c152c1810f --- /dev/null +++ b/src/dlt/mock_blockchain/service/__main__.py @@ -0,0 +1,61 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, signal, sys, threading +from common.Constants import ServiceNameEnum +from common.Settings import get_log_level, get_service_port_grpc +from common.proto.dlt_gateway_pb2_grpc import add_DltGatewayServiceServicer_to_server +from common.tests.MockServicerImpl_DltGateway import MockServicerImpl_DltGateway +from common.tools.service.GenericGrpcService import GenericGrpcService + +terminate = threading.Event() + +logging.basicConfig(level=get_log_level()) +LOGGER = logging.getLogger(__name__) + +class MockDltGatewayService(GenericGrpcService): + def __init__(self, cls_name: str = 'MockDltGatewayService') -> None: + port = get_service_port_grpc(ServiceNameEnum.DLT_GATEWAY) + super().__init__(port, cls_name=cls_name) + self.dltgateway_servicer = MockServicerImpl_DltGateway() + + # pylint: disable=attribute-defined-outside-init + def install_servicers(self): + add_DltGatewayServiceServicer_to_server(self.dltgateway_servicer, self.server) + +def signal_handler(signal, frame): # pylint: disable=redefined-outer-name + LOGGER.warning('Terminate signal received') + terminate.set() + +def main(): + signal.signal(signal.SIGINT, signal_handler) + signal.signal(signal.SIGTERM, signal_handler) + + LOGGER.info('Starting...') + + # Starting Mock DLT gateway service + grpc_service = MockDltGatewayService() + grpc_service.start() + + # Wait for Ctrl+C or termination signal + while not terminate.wait(timeout=0.1): pass + + LOGGER.info('Terminating...') + grpc_service.stop() + + LOGGER.info('Bye') + return 0 + +if __name__ == '__main__': + sys.exit(main())